diff --git a/.travis.yml b/.travis.yml index dc57e48c61..3b5ebdcaf2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,6 @@ language: go go: - - "1.11" + - "1.13" sudo: required services: - docker diff --git a/Dockerfile b/Dockerfile index 11729c0c96..47d057e8cd 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build stage - Use a full build environment to create a static binary -FROM golang:1.11 +FROM golang:1.13 COPY . /go/src/github.com/OpenBazaar/openbazaar-go RUN go build --ldflags '-extldflags "-static"' -o /opt/openbazaard /go/src/github.com/OpenBazaar/openbazaar-go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 868699cf77..5f750518e3 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/OpenBazaar/openbazaar-go", - "GoVersion": "go1.11", + "GoVersion": "go1.13", "GodepVersion": "v80", "Deps": [ { @@ -2501,11 +2501,7 @@ }, { "ImportPath": "github.com/OpenBazaar/multiwallet/client/errors", -<<<<<<< HEAD - "Rev": "2b4be2cd7336e77e7ceffe0bce05e8ebec3be287" -======= "Rev": "e2e450015927ced1aa82ded74eba802ec0b3ddf1" ->>>>>>> master }, { "ImportPath": "github.com/OpenBazaar/multiwallet/vendor/github.com/OpenBazaar/wallet-interface", diff --git a/api/jsonapi.go b/api/jsonapi.go index f3802a2e60..c54d5966b1 100644 --- a/api/jsonapi.go +++ b/api/jsonapi.go @@ -3371,7 +3371,7 @@ func (i *jsonAPIHandler) POSTBumpFee(w http.ResponseWriter, r *http.Request) { } var wal wallet.Wallet for _, w := range i.node.Multiwallet { - _, err := w.GetTransaction(*txHash) + _, err := w.GetTransaction(txHash.String()) if err == nil { wal = w break @@ -3381,7 +3381,12 @@ func (i *jsonAPIHandler) POSTBumpFee(w http.ResponseWriter, r *http.Request) { ErrorResponse(w, http.StatusBadRequest, "transaction not found in any wallet") return } - newTxid, err := wal.BumpFee(*txHash) + feeBumper, ok := wal.(wallet.WalletCanBumpFee) + if !ok { + ErrorResponse(w, http.StatusBadRequest, "wallet does not support bumping fees") + return + } + newTxid, err := feeBumper.BumpFee(txHash.String()) if err != nil { if err == spvwallet.BumpFeeAlreadyConfirmedError { ErrorResponse(w, http.StatusBadRequest, err.Error()) @@ -3405,7 +3410,7 @@ func (i *jsonAPIHandler) POSTBumpFee(w http.ResponseWriter, r *http.Request) { return } if err := i.node.Datastore.TxMetadata().Put(repo.Metadata{ - Txid: newTxid.String(), + Txid: newTxid, Address: "", Memo: fmt.Sprintf("Fee bump of %s", txid), OrderId: "", @@ -3429,7 +3434,7 @@ func (i *jsonAPIHandler) POSTBumpFee(w http.ResponseWriter, r *http.Request) { ErrorResponse(w, http.StatusInternalServerError, err.Error()) return } - txn, err := wal.GetTransaction(*newTxid) + txn, err := wal.GetTransaction(newTxid) if err != nil { ErrorResponse(w, http.StatusInternalServerError, err.Error()) return @@ -3439,7 +3444,7 @@ func (i *jsonAPIHandler) POSTBumpFee(w http.ResponseWriter, r *http.Request) { t := repo.NewAPITime(txn.Timestamp) resp := &response{ - Txid: newTxid.String(), + Txid: newTxid, ConfirmedBalance: &repo.CurrencyValue{Currency: defn, Amount: &confirmed.Value}, UnconfirmedBalance: &repo.CurrencyValue{Currency: defn, Amount: &unconfirmed.Value}, Amount: amt0, @@ -3468,7 +3473,7 @@ func (i *jsonAPIHandler) GETEstimateFee(w http.ResponseWriter, r *http.Request) var feeLevel wallet.FeeLevel switch strings.ToUpper(fl) { case "PRIORITY": - feeLevel = wallet.PRIORITY + feeLevel = wallet.PRIOIRTY case "NORMAL": feeLevel = wallet.NORMAL case "ECONOMIC": @@ -3526,7 +3531,7 @@ func (i *jsonAPIHandler) GETFees(w http.ResponseWriter, r *http.Request) { if coinType == "fees" { ret := make(map[string]interface{}) for ct, wal := range i.node.Multiwallet { - priority := wal.GetFeePerByte(wallet.PRIORITY) + priority := wal.GetFeePerByte(wallet.PRIOIRTY) normal := wal.GetFeePerByte(wallet.NORMAL) economic := wal.GetFeePerByte(wallet.ECONOMIC) superEconomic := wal.GetFeePerByte(wallet.SUPER_ECONOMIC) @@ -3555,7 +3560,7 @@ func (i *jsonAPIHandler) GETFees(w http.ResponseWriter, r *http.Request) { ErrorResponse(w, http.StatusBadRequest, "Unknown wallet type") return } - priority := wal.GetFeePerByte(wallet.PRIORITY) + priority := wal.GetFeePerByte(wallet.PRIOIRTY) normal := wal.GetFeePerByte(wallet.NORMAL) economic := wal.GetFeePerByte(wallet.ECONOMIC) superEconomic := wal.GetFeePerByte(wallet.SUPER_ECONOMIC) @@ -3947,7 +3952,7 @@ func (i *jsonAPIHandler) GETWalletStatus(w http.ResponseWriter, r *http.Request) ret := make(map[string]interface{}) for ct, wal := range i.node.Multiwallet { height, hash := wal.ChainTip() - ret[ct.CurrencyCode()] = status{height, hash.String()} + ret[ct.CurrencyCode()] = status{height, hash} } out, err := json.MarshalIndent(ret, "", " ") if err != nil { @@ -3963,7 +3968,7 @@ func (i *jsonAPIHandler) GETWalletStatus(w http.ResponseWriter, r *http.Request) return } height, hash := wal.ChainTip() - st := status{height, hash.String()} + st := status{height, hash} out, err := json.MarshalIndent(st, "", " ") if err != nil { ErrorResponse(w, http.StatusInternalServerError, err.Error()) diff --git a/core/completion.go b/core/completion.go index 9439868c99..587197c1fd 100644 --- a/core/completion.go +++ b/core/completion.go @@ -177,6 +177,10 @@ func (n *OpenBazaarNode) CompleteOrder(orderRatings *OrderRatings, contract *pb. // Payout order if moderated and not disputed if order.Payment.Method == pb.Order_Payment_MODERATED && contract.DisputeResolution == nil { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } var ins []wallet.TransactionInput outValue := new(big.Int) for _, r := range records { @@ -230,7 +234,7 @@ func (n *OpenBazaarNode) CompleteOrder(orderRatings *OrderRatings, contract *pb. if !ok { return errors.New("invalid payout fee per byte value") } - buyerSignatures, err := wal.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, buyerKey, redeemScript, *n) + buyerSignatures, err := escrowWallet.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, buyerKey, redeemScript, *n) if err != nil { return err } @@ -318,6 +322,10 @@ func (n *OpenBazaarNode) ReleaseFundsAfterTimeout(contract *pb.RicardianContract if err != nil { return err } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } defn, err := repo.AllCurrencies().Lookup(order.Payment.AmountCurrency.Code) if err != nil { log.Errorf("Failed ReleaseFundsAfterTimeout(): %s", err.Error()) @@ -334,7 +342,7 @@ func (n *OpenBazaarNode) ReleaseFundsAfterTimeout(contract *pb.RicardianContract return err } - confirms, _, err := wal.GetConfirmations(*hash) + confirms, _, err := wal.GetConfirmations(hash.String()) if err != nil { return err } @@ -382,7 +390,7 @@ func (n *OpenBazaarNode) ReleaseFundsAfterTimeout(contract *pb.RicardianContract if err != nil { return err } - _, err = wal.SweepAddress(txInputs, nil, vendorKey, &redeemScript, wallet.NORMAL) + _, err = escrowWallet.SweepAddress(txInputs, nil, vendorKey, &redeemScript, wallet.NORMAL) if err != nil { return err } diff --git a/core/confirmation.go b/core/confirmation.go index 74d5aa3bab..3c16458d6e 100644 --- a/core/confirmation.go +++ b/core/confirmation.go @@ -127,6 +127,10 @@ func (n *OpenBazaarNode) ConfirmOfflineOrder(oldState pb.OrderState, contract *p } if confirmedContract.BuyerOrder.Payment.Method != pb.Order_Payment_MODERATED { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } // Sweep the temp address into our wallet var txInputs []wallet.TransactionInput for _, r := range records { @@ -176,7 +180,7 @@ func (n *OpenBazaarNode) ConfirmOfflineOrder(oldState pb.OrderState, contract *p recoverState() return err } - _, err = wal.SweepAddress(txInputs, nil, vendorKey, &redeemScript, wallet.NORMAL) + _, err = escrowWallet.SweepAddress(txInputs, nil, vendorKey, &redeemScript, wallet.NORMAL) if err != nil { recoverState() return err @@ -213,6 +217,10 @@ func (n *OpenBazaarNode) RejectOfflineOrder(contract *pb.RicardianContract, reco } rejectMsg.Timestamp = ts if order.Payment.Method == pb.Order_Payment_MODERATED { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } var ins []wallet.TransactionInput outValue := *big.NewInt(0) for _, r := range records { @@ -265,7 +273,7 @@ func (n *OpenBazaarNode) RejectOfflineOrder(contract *pb.RicardianContract, reco if !ok { return errors.New("invalid refund fee value") } - signatures, err := wal.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, vendorKey, redeemScript, *fee) + signatures, err := escrowWallet.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, vendorKey, redeemScript, *fee) if err != nil { return fmt.Errorf("generate multisig: %s", err.Error()) } diff --git a/core/disputes.go b/core/disputes.go index f4324229ec..c1c23f7bad 100644 --- a/core/disputes.go +++ b/core/disputes.go @@ -176,7 +176,7 @@ func (n *OpenBazaarNode) verifyEscrowFundsAreDisputeable(contract *pb.RicardianC log.Errorf("Nil NewHashFromStr(%s)", r.Txid) return false } - actualConfirmations, _, err := wal.GetConfirmations(*hash) + actualConfirmations, _, err := wal.GetConfirmations(hash.String()) if err != nil { log.Errorf("Failed GetConfirmations(%s): %s", hash.String(), err.Error()) return false @@ -592,6 +592,10 @@ func (n *OpenBazaarNode) CloseDispute(orderID string, buyerPercentage, vendorPer if err != nil { return fmt.Errorf("currency (%s) not supported by wallet", preferredOrder.Payment.AmountCurrency.Code) } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } // Create outputs using full value. We will subtract the fee off each output later. outMap := make(map[string]wallet.TransactionOutput) @@ -743,7 +747,7 @@ func (n *OpenBazaarNode) CloseDispute(orderID string, buyerPercentage, vendorPer return err } - sigs, err := wal.CreateMultisigSignature(inputs, outs, moderatorKey, redeemScriptBytes, *big.NewInt(0)) + sigs, err := escrowWallet.CreateMultisigSignature(inputs, outs, moderatorKey, redeemScriptBytes, *big.NewInt(0)) if err != nil { return err } @@ -982,6 +986,11 @@ func (n *OpenBazaarNode) ValidateCaseContract(contract *pb.RicardianContract) [] validationErrors = append(validationErrors, "Contract uses a coin not found in wallet") return validationErrors } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + validationErrors = append(validationErrors, "Wallet does not support escrow") + return validationErrors + } chaincode, err := hex.DecodeString(order.Payment.Chaincode) if err != nil { validationErrors = append(validationErrors, "Error validating bitcoin address and redeem script") @@ -1008,7 +1017,7 @@ func (n *OpenBazaarNode) ValidateCaseContract(contract *pb.RicardianContract) [] return validationErrors } timeout, _ := time.ParseDuration(strconv.Itoa(int(contract.VendorListings[0].Metadata.EscrowTimeoutHours)) + "h") - addr, redeemScript, err := wal.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey, *moderatorKey}, 2, timeout, vendorKey) + addr, redeemScript, err := escrowWallet.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey, *moderatorKey}, 2, timeout, vendorKey) if err != nil { validationErrors = append(validationErrors, "Error generating multisig script") return validationErrors @@ -1150,6 +1159,10 @@ func (n *OpenBazaarNode) ReleaseFunds(contract *pb.RicardianContract, records [] if err != nil { return err } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } // Create outputs var outputs []wallet.TransactionOutput @@ -1222,7 +1235,7 @@ func (n *OpenBazaarNode) ReleaseFunds(contract *pb.RicardianContract, records [] return err } - mySigs, err := wal.CreateMultisigSignature(inputs, outputs, signingKey, redeemScriptBytes, *big.NewInt(0)) + mySigs, err := escrowWallet.CreateMultisigSignature(inputs, outputs, signingKey, redeemScriptBytes, *big.NewInt(0)) if err != nil { return err } @@ -1249,7 +1262,7 @@ func (n *OpenBazaarNode) ReleaseFunds(contract *pb.RicardianContract, records [] peerID := order.BuyerID.PeerID // Build, sign, and broadcast transaction - txnID, err := wal.Multisign(inputs, outputs, mySigs, moderatorSigs, redeemScriptBytes, *big.NewInt(0), true) + txnID, err := escrowWallet.Multisign(inputs, outputs, mySigs, moderatorSigs, redeemScriptBytes, *big.NewInt(0), true) if err != nil { return err } diff --git a/core/fulfillment.go b/core/fulfillment.go index 8190ca45ac..2b73deb185 100644 --- a/core/fulfillment.go +++ b/core/fulfillment.go @@ -37,6 +37,10 @@ func (n *OpenBazaarNode) FulfillOrder(fulfillment *pb.OrderFulfillment, contract if err != nil { return err } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } currentAddress := wal.CurrentAddress(wallet.EXTERNAL) payout.PayoutAddress = currentAddress.String() f := wal.GetFeePerByte(wallet.NORMAL) @@ -79,7 +83,7 @@ func (n *OpenBazaarNode) FulfillOrder(fulfillment *pb.OrderFulfillment, contract if !ok { return errors.New("invalid payout fee value") } - signatures, err := wal.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, vendorKey, redeemScript, *fee) + signatures, err := escrowWallet.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, vendorKey, redeemScript, *fee) if err != nil { return err } diff --git a/core/order.go b/core/order.go index a4734c00fd..74b679c0f8 100644 --- a/core/order.go +++ b/core/order.go @@ -212,6 +212,10 @@ func (n *OpenBazaarNode) Purchase(data *repo.PurchaseData) (orderID string, paym } func prepareModeratedOrderContract(data *repo.PurchaseData, n *OpenBazaarNode, contract *pb.RicardianContract, wal wallet.Wallet) (*pb.RicardianContract, error) { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return nil, errors.New("wallet does not support escrow") + } if data.Moderator == n.IpfsNode.Identity.Pretty() { return nil, errors.New("cannot select self as moderator") } @@ -303,7 +307,7 @@ func prepareModeratedOrderContract(data *repo.PurchaseData, n *OpenBazaarNode, c if err != nil { return nil, err } - addr, redeemScript, err := wal.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey, *moderatorKey}, 2, timeout, vendorKey) + addr, redeemScript, err := escrowWallet.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey, *moderatorKey}, 2, timeout, vendorKey) if err != nil { return nil, err } @@ -379,6 +383,10 @@ func processOnlineDirectOrder(resp *pb.Message, n *OpenBazaarNode, wal wallet.Wa } func processOfflineDirectOrder(n *OpenBazaarNode, wal wallet.Wallet, contract *pb.RicardianContract, payment *pb.Order_Payment) (string, string, big.Int, error) { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return "", "", big.Int{}, errors.New("wallet does not support escrow") + } // Vendor offline // Change payment code to direct v5Order, err := repo.ToV5Order(contract.BuyerOrder, nil) @@ -414,7 +422,7 @@ func processOfflineDirectOrder(n *OpenBazaarNode, wal wallet.Wallet, contract *p if err != nil { return "", "", *big.NewInt(0), err } - addr, redeemScript, err := wal.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey}, 1, time.Duration(0), nil) + addr, redeemScript, err := escrowWallet.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey}, 1, time.Duration(0), nil) if err != nil { return "", "", *big.NewInt(0), err } @@ -1095,6 +1103,10 @@ func (n *OpenBazaarNode) CancelOfflineOrder(contract *pb.RicardianContract, reco if err != nil { return err } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } // Sweep the temp address into our wallet var utxos []wallet.TransactionInput for _, r := range records { @@ -1141,7 +1153,7 @@ func (n *OpenBazaarNode) CancelOfflineOrder(contract *pb.RicardianContract, reco if err != nil { return err } - _, err = wal.SweepAddress(utxos, &refundAddress, buyerKey, &redeemScript, wallet.NORMAL) + _, err = escrowWallet.SweepAddress(utxos, &refundAddress, buyerKey, &redeemScript, wallet.NORMAL) if err != nil { return err } @@ -1838,6 +1850,10 @@ func (n *OpenBazaarNode) ValidateDirectPaymentAddress(order *pb.Order) error { if err != nil { return err } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } mECKey, err := n.MasterPrivateKey.ECPubKey() if err != nil { return err @@ -1850,7 +1866,7 @@ func (n *OpenBazaarNode) ValidateDirectPaymentAddress(order *pb.Order) error { if err != nil { return err } - addr, redeemScript, err := wal.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey}, 1, time.Duration(0), nil) + addr, redeemScript, err := escrowWallet.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey}, 1, time.Duration(0), nil) if err != nil { return err } @@ -1870,6 +1886,10 @@ func (n *OpenBazaarNode) ValidateModeratedPaymentAddress(order *pb.Order, timeou if err != nil { return err } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } ipnsPath := ipfspath.FromString(order.Payment.Moderator + "/profile.json") profileBytes, err := ipfs.ResolveThenCat(n.IpfsNode, ipnsPath, time.Minute, n.IPNSQuorumSize, true) if err != nil { @@ -1912,7 +1932,7 @@ func (n *OpenBazaarNode) ValidateModeratedPaymentAddress(order *pb.Order, timeou if !bytes.Equal(order.Payment.ModeratorKey, modPub.SerializeCompressed()) { return errors.New("invalid moderator key") } - addr, redeemScript, err := wal.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey, *moderatorKey}, 2, timeout, vendorKey) + addr, redeemScript, err := escrowWallet.GenerateMultisigScript([]hd.ExtendedKey{*buyerKey, *vendorKey, *moderatorKey}, 2, timeout, vendorKey) if err != nil { return err } diff --git a/core/refunds.go b/core/refunds.go index 139f1b5046..4b082554ea 100644 --- a/core/refunds.go +++ b/core/refunds.go @@ -35,6 +35,10 @@ func (n *OpenBazaarNode) RefundOrder(contract *pb.RicardianContract, records []* if err != nil { return err } + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return errors.New("wallet does not support escrow") + } if order.Payment.Method == pb.Order_Payment_MODERATED { var ins []wallet.TransactionInput outValue := big.NewInt(0) @@ -76,7 +80,7 @@ func (n *OpenBazaarNode) RefundOrder(contract *pb.RicardianContract, records []* return err } f, _ := new(big.Int).SetString(order.BigRefundFee, 10) - signatures, err := wal.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, vendorKey, redeemScript, *f) + signatures, err := escrowWallet.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, vendorKey, redeemScript, *f) if err != nil { return err } @@ -102,7 +106,7 @@ func (n *OpenBazaarNode) RefundOrder(contract *pb.RicardianContract, records []* return err } txinfo := new(pb.Refund_TransactionInfo) - txinfo.Txid = txid.String() + txinfo.Txid = txid txinfo.BigValue = outValue.String() txinfo.ValueCurrency = contract.BuyerOrder.Payment.AmountCurrency refundMsg.RefundTransaction = txinfo diff --git a/core/spend.go b/core/spend.go index 38135aedbf..681f6d05bc 100644 --- a/core/spend.go +++ b/core/spend.go @@ -92,7 +92,7 @@ func (n *OpenBazaarNode) Spend(args *SpendRequest) (*SpendResponse, error) { switch strings.ToUpper(args.FeeLevel) { case "PRIORITY": - feeLevel = wallet.PRIORITY + feeLevel = wallet.PRIOIRTY case "NORMAL": feeLevel = wallet.NORMAL case "ECONOMIC": @@ -115,7 +115,7 @@ func (n *OpenBazaarNode) Spend(args *SpendRequest) (*SpendResponse, error) { } } - txn, err := wal.GetTransaction(*txid) + txn, err := wal.GetTransaction(txid) if err != nil { log.Errorf("get txn failed : %v", err.Error()) return nil, fmt.Errorf("failed retrieving new wallet balance: %s", err) @@ -146,7 +146,7 @@ func (n *OpenBazaarNode) Spend(args *SpendRequest) (*SpendResponse, error) { } if err := n.Datastore.TxMetadata().Put(repo.Metadata{ - Txid: txid.String(), + Txid: txid, Address: toAddress, Memo: memo, OrderId: args.OrderID, @@ -163,7 +163,7 @@ func (n *OpenBazaarNode) Spend(args *SpendRequest) (*SpendResponse, error) { } return &SpendResponse{ - Txid: txid.String(), + Txid: txid, ConfirmedBalance: confirmed.Value.String(), UnconfirmedBalance: unconfirmed.Value.String(), Currency: &defn, diff --git a/core/utils.go b/core/utils.go index 3d9de4578b..d00387da9e 100644 --- a/core/utils.go +++ b/core/utils.go @@ -48,11 +48,8 @@ func (n *OpenBazaarNode) BuildTransactionRecords(contract *pb.RicardianContract, return paymentRecords, nil, err } tx.Timestamp = ts - ch, err := chainhash.NewHashFromStr(strings.TrimPrefix(tx.Txid, "0x")) - if err != nil { - return paymentRecords, nil, err - } - confirmations, height, err := wal.GetConfirmations(*ch) + + confirmations, height, err := wal.GetConfirmations(tx.Txid) if err != nil { return paymentRecords, nil, err } @@ -90,10 +87,17 @@ func (n *OpenBazaarNode) BuildTransactionRecords(contract *pb.RicardianContract, refundRecord = new(pb.TransactionRecord) // Direct we need to use the transaction info in the contract's refund object ch, err := chainhash.NewHashFromStr(strings.TrimPrefix(contract.Refund.RefundTransaction.Txid, "0x")) + var txid string if err != nil { - return paymentRecords, refundRecord, err + if strings.HasPrefix(contract.Refund.RefundTransaction.Txid, "bafy") { + txid = contract.Refund.RefundTransaction.Txid + } else { + return paymentRecords, refundRecord, err + } + } else { + txid = ch.String() } - confirmations, height, err := wal.GetConfirmations(*ch) + confirmations, height, err := wal.GetConfirmations(txid) if err != nil { return paymentRecords, refundRecord, nil } diff --git a/net/service/handlers.go b/net/service/handlers.go index fe255bf2c1..8ddf009a9a 100644 --- a/net/service/handlers.go +++ b/net/service/handlers.go @@ -731,6 +731,10 @@ func (service *OpenBazaarService) handleReject(p peer.ID, pmes *pb.Message, opti } if order.Payment.Method != pb.Order_Payment_MODERATED { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return nil, errors.New("wallet does not support escrow") + } // Sweep the address into our wallet var txInputs []wallet.TransactionInput for _, r := range records { @@ -774,11 +778,15 @@ func (service *OpenBazaarService) handleReject(p peer.ID, pmes *pb.Message, opti if err != nil { return nil, err } - _, err = wal.SweepAddress(txInputs, &refundAddress, buyerKey, &redeemScript, wallet.NORMAL) + _, err = escrowWallet.SweepAddress(txInputs, &refundAddress, buyerKey, &redeemScript, wallet.NORMAL) if err != nil { return nil, err } } else { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return nil, errors.New("wallet does not support escrow") + } var ins []wallet.TransactionInput outValue := big.NewInt(0) for _, r := range records { @@ -822,7 +830,7 @@ func (service *OpenBazaarService) handleReject(p peer.ID, pmes *pb.Message, opti if !ok { return nil, errors.New("invalid amount") } - buyerSignatures, err := wal.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, buyerKey, redeemScript, *fee) + buyerSignatures, err := escrowWallet.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, buyerKey, redeemScript, *fee) if err != nil { return nil, err } @@ -831,7 +839,7 @@ func (service *OpenBazaarService) handleReject(p peer.ID, pmes *pb.Message, opti sig := wallet.Signature{InputIndex: s.InputIndex, Signature: s.Signature} vendorSignatures = append(vendorSignatures, sig) } - _, err = wal.Multisign(ins, []wallet.TransactionOutput{output}, buyerSignatures, vendorSignatures, redeemScript, *fee, true) + _, err = escrowWallet.Multisign(ins, []wallet.TransactionOutput{output}, buyerSignatures, vendorSignatures, redeemScript, *fee, true) if err != nil { return nil, err } @@ -926,6 +934,10 @@ func (service *OpenBazaarService) handleRefund(p peer.ID, pmes *pb.Message, opti } if order.Payment.Method == pb.Order_Payment_MODERATED { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return nil, errors.New("wallet does not support escrow") + } var ins []wallet.TransactionInput outValue := big.NewInt(0) for _, r := range records { @@ -969,7 +981,7 @@ func (service *OpenBazaarService) handleRefund(p peer.ID, pmes *pb.Message, opti if !ok { return nil, errors.New("invalid amount") } - buyerSignatures, err := wal.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, buyerKey, redeemScript, *fee) + buyerSignatures, err := escrowWallet.CreateMultisigSignature(ins, []wallet.TransactionOutput{output}, buyerKey, redeemScript, *fee) if err != nil { return nil, err } @@ -978,7 +990,7 @@ func (service *OpenBazaarService) handleRefund(p peer.ID, pmes *pb.Message, opti sig := wallet.Signature{InputIndex: s.InputIndex, Signature: s.Signature} vendorSignatures = append(vendorSignatures, sig) } - _, err = wal.Multisign(ins, []wallet.TransactionOutput{output}, buyerSignatures, vendorSignatures, redeemScript, *fee, true) + _, err = escrowWallet.Multisign(ins, []wallet.TransactionOutput{output}, buyerSignatures, vendorSignatures, redeemScript, *fee, true) if err != nil { return nil, err } @@ -1191,6 +1203,10 @@ func (service *OpenBazaarService) handleOrderCompletion(p peer.ID, pmes *pb.Mess return nil, err } if order.Payment.Method == pb.Order_Payment_MODERATED && state != pb.OrderState_DISPUTED && state != pb.OrderState_DECIDED && state != pb.OrderState_RESOLVED && state != pb.OrderState_PAYMENT_FINALIZED { + escrowWallet, ok := wal.(wallet.EscrowWallet) + if !ok { + return nil, errors.New("wallet does not support escrow") + } var ins []wallet.TransactionInput outValue := big.NewInt(0) for _, r := range records { @@ -1243,7 +1259,7 @@ func (service *OpenBazaarService) handleOrderCompletion(p peer.ID, pmes *pb.Mess if !ok { return nil, errors.New("invalid amount") } - _, err = wal.Multisign(ins, []wallet.TransactionOutput{output}, buyerSignatures, vendorSignatures, redeemScript, *payoutFee, true) + _, err = escrowWallet.Multisign(ins, []wallet.TransactionOutput{output}, buyerSignatures, vendorSignatures, redeemScript, *payoutFee, true) if err != nil { if err.Error() == "ERROR_INSUFFICIENT_FUNDS" { err0 := service.node.Datastore.Messages().Put( @@ -1974,12 +1990,26 @@ func (service *OpenBazaarService) handleOrderPayment(peer peer.ID, pmes *pb.Mess } chash, err := chainhash.NewHashFromStr(paymentDetails.GetTransactionID()) + var txid string if err != nil { - return nil, err + if _, ok := cid.Decode(paymentDetails.TransactionID); ok == nil { + txid = paymentDetails.TransactionID + } else { + return nil, err + } + } else { + txid = chash.String() } - log.Debugf("retrieving %s transaction %s", paymentDetails.Coin, chash.String()) - txn, err := wal.GetTransaction(*chash) + log.Debugf("retrieving %s transaction %s", paymentDetails.Coin, txid) + var txn wallet.Txn + for i := 0; i < 10; i++ { + txn, err = wal.GetTransaction(txid) + if err == nil { + break + } + time.Sleep(time.Second * 2) + } if err != nil { return nil, err } @@ -2043,6 +2073,13 @@ func (service *OpenBazaarService) handleOrderPayment(peer peer.ID, pmes *pb.Mess } } + if toAddress.String() == "" { + toAddress, err = wal.DecodeAddress(txn.ToAddress) + if err != nil { + log.Error(err) + } + } + outputs := []wallet.TransactionOutput{} for _, o := range txn.Outputs { output := wallet.TransactionOutput{ diff --git a/repo/currency_definition.go b/repo/currency_definition.go index a12b1d9b75..106ed5560b 100644 --- a/repo/currency_definition.go +++ b/repo/currency_definition.go @@ -72,6 +72,7 @@ var ( "LTC": {Name: "Litecoin", Code: CurrencyCode("LTC"), CurrencyType: Crypto, Divisibility: 8, BlockTime: 150 * time.Second}, "ZEC": {Name: "Zcash", Code: CurrencyCode("ZEC"), CurrencyType: Crypto, Divisibility: 8, BlockTime: DefaultBlockTime}, "ETH": {Name: "Ethereum", Code: CurrencyCode("ETH"), CurrencyType: Crypto, Divisibility: 18, BlockTime: 10 * time.Second}, + "FIL": {Name: "Filecoin", Code: CurrencyCode("FIL"), CurrencyType: Crypto, Divisibility: 18, BlockTime: 10 * time.Second}, } testnetCryptoDefinitions = map[string]CurrencyDefinition{ "TBTC": {Name: "Testnet Bitcoin", Code: CurrencyCode("TBTC"), CurrencyType: Crypto, Divisibility: 8, BlockTime: DefaultBlockTime}, @@ -79,6 +80,7 @@ var ( "TLTC": {Name: "Testnet Litecoin", Code: CurrencyCode("TLTC"), CurrencyType: Crypto, Divisibility: 8, BlockTime: 150 * time.Second}, "TZEC": {Name: "Testnet Zcash", Code: CurrencyCode("TZEC"), CurrencyType: Crypto, Divisibility: 8, BlockTime: DefaultBlockTime}, "TETH": {Name: "Testnet Ethereum", Code: CurrencyCode("TETH"), CurrencyType: Crypto, Divisibility: 18, BlockTime: 10 * time.Second}, + "TFIL": {Name: "Testnet Filecoin", Code: CurrencyCode("TFIL"), CurrencyType: Crypto, Divisibility: 18, BlockTime: 10 * time.Second}, } fiatDefinitions = map[string]CurrencyDefinition{ "AED": {Name: "UAE Dirham", Code: CurrencyCode("AED"), CurrencyType: Fiat, Divisibility: 2}, diff --git a/repo/db/txns.go b/repo/db/txns.go index 522e8da283..4b23cf5a19 100644 --- a/repo/db/txns.go +++ b/repo/db/txns.go @@ -8,7 +8,6 @@ import ( "github.com/OpenBazaar/openbazaar-go/repo" "github.com/OpenBazaar/wallet-interface" - "github.com/btcsuite/btcd/chaincfg/chainhash" ) type TxnsDB struct { @@ -41,7 +40,7 @@ func (t *TxnsDB) Put(raw []byte, txid, value string, height int, timestamp time. return nil } -func (t *TxnsDB) Get(txid chainhash.Hash) (wallet.Txn, error) { +func (t *TxnsDB) Get(txid string) (wallet.Txn, error) { t.lock.Lock() defer t.lock.Unlock() var txn wallet.Txn @@ -55,7 +54,7 @@ func (t *TxnsDB) Get(txid chainhash.Hash) (wallet.Txn, error) { var timestamp int var value string var watchOnlyInt int - err = stmt.QueryRow(txid.String(), t.coinType.CurrencyCode()).Scan(&raw, &value, &height, ×tamp, &watchOnlyInt) + err = stmt.QueryRow(txid, t.coinType.CurrencyCode()).Scan(&raw, &value, &height, ×tamp, &watchOnlyInt) if err != nil { return txn, err } @@ -64,7 +63,7 @@ func (t *TxnsDB) Get(txid chainhash.Hash) (wallet.Txn, error) { watchOnly = true } txn = wallet.Txn{ - Txid: txid.String(), + Txid: txid, Value: value, Height: int32(height), Timestamp: time.Unix(int64(timestamp), 0), @@ -117,17 +116,17 @@ func (t *TxnsDB) GetAll(includeWatchOnly bool) ([]wallet.Txn, error) { return ret, nil } -func (t *TxnsDB) Delete(txid *chainhash.Hash) error { +func (t *TxnsDB) Delete(txid string) error { t.lock.Lock() defer t.lock.Unlock() - _, err := t.db.Exec("delete from txns where txid=? and coin=?", txid.String(), t.coinType.CurrencyCode()) + _, err := t.db.Exec("delete from txns where txid=? and coin=?", txid, t.coinType.CurrencyCode()) if err != nil { return err } return nil } -func (t *TxnsDB) UpdateHeight(txid chainhash.Hash, height int, timestamp time.Time) error { +func (t *TxnsDB) UpdateHeight(txid string, height int, timestamp time.Time) error { t.lock.Lock() defer t.lock.Unlock() @@ -136,7 +135,7 @@ func (t *TxnsDB) UpdateHeight(txid chainhash.Hash, height int, timestamp time.Ti return fmt.Errorf("prepare txn sql: %s", err.Error()) } defer stmt.Close() - _, err = stmt.Exec(height, int(timestamp.Unix()), txid.String(), t.coinType.CurrencyCode()) + _, err = stmt.Exec(height, int(timestamp.Unix()), txid, t.coinType.CurrencyCode()) if err != nil { return fmt.Errorf("update txns: %s", err.Error()) } diff --git a/repo/db/txns_test.go b/repo/db/txns_test.go index 696451b038..1ed13fdf81 100644 --- a/repo/db/txns_test.go +++ b/repo/db/txns_test.go @@ -100,7 +100,7 @@ func TestTxnsGet(t *testing.T) { if err != nil { t.Error(err) } - txn, err := txdb.Get(tx.TxHash()) + txn, err := txdb.Get(tx.TxHash().String()) if err != nil { t.Error(err) } @@ -173,7 +173,7 @@ func TestDeleteTxns(t *testing.T) { t.Error(err) } txid := tx.TxHash() - err = txdb.Delete(&txid) + err = txdb.Delete(txid.String()) if err != nil { t.Error(err) } @@ -208,11 +208,11 @@ func TestTxnsDB_UpdateHeight(t *testing.T) { if err != nil { t.Error(err) } - err = txdb.UpdateHeight(tx.TxHash(), -1, time.Now()) + err = txdb.UpdateHeight(tx.TxHash().String(), -1, time.Now()) if err != nil { t.Error(err) } - txn, err := txdb.Get(tx.TxHash()) + txn, err := txdb.Get(tx.TxHash().String()) if err != nil { t.Error(err) } diff --git a/schema/configuration.go b/schema/configuration.go index 9bdd5df531..26baa088bd 100644 --- a/schema/configuration.go +++ b/schema/configuration.go @@ -36,6 +36,7 @@ type WalletsConfig struct { LTC *CoinConfig `json:"LTC"` ZEC *CoinConfig `json:"ZEC"` ETH *CoinConfig `json:"ETH"` + FIL *CoinConfig `json:"FIL"` } type CoinConfig struct { @@ -130,6 +131,17 @@ func DefaultWalletsConfig() *WalletsConfig { MaxFee: 200, WalletOptions: EthereumDefaultOptions(), }, + FIL: &CoinConfig{ + Type: WalletTypeAPI, + APIPool: CoinPoolFIL, + APITestnetPool: CoinPoolTFIL, + FeeAPI: "", // intentionally blank + LowFeeDefault: 7, + MediumFeeDefault: 15, + HighFeeDefault: 30, + MaxFee: 200, + WalletOptions: nil, + }, } } diff --git a/schema/constants.go b/schema/constants.go index 0c09b7bec5..5bc589f357 100644 --- a/schema/constants.go +++ b/schema/constants.go @@ -100,12 +100,14 @@ const ( CoinAPIOpenBazaarLTC = "https://ltc.api.openbazaar.org/api" CoinAPIOpenBazaarZEC = "https://zec.api.openbazaar.org/api" CoinAPIOpenBazaarETH = "https://mainnet.infura.io" + CoinAPIOpenBazaarFIL = "http://localhost:8080/api" CoinAPIOpenBazaarTBTC = "https://tbtc.api.openbazaar.org/api" CoinAPIOpenBazaarTBCH = "https://tbch.api.openbazaar.org/api" CoinAPIOpenBazaarTLTC = "https://tltc.api.openbazaar.org/api" CoinAPIOpenBazaarTZEC = "https://tzec.api.openbazaar.org/api" CoinAPIOpenBazaarTETH = "https://rinkeby.infura.io" + CoinAPIOpenBazaarTFIL = "http://localhost:8080/api" ) var ( @@ -114,10 +116,12 @@ var ( CoinPoolLTC = []string{CoinAPIOpenBazaarLTC} CoinPoolZEC = []string{CoinAPIOpenBazaarZEC} CoinPoolETH = []string{CoinAPIOpenBazaarETH} + CoinPoolFIL = []string{CoinAPIOpenBazaarFIL} CoinPoolTBTC = []string{CoinAPIOpenBazaarTBTC} CoinPoolTBCH = []string{CoinAPIOpenBazaarTBCH} CoinPoolTLTC = []string{CoinAPIOpenBazaarTLTC} CoinPoolTZEC = []string{CoinAPIOpenBazaarTZEC} CoinPoolTETH = []string{CoinAPIOpenBazaarTETH} + CoinPoolTFIL = []string{CoinAPIOpenBazaarTFIL} ) diff --git a/vendor/github.com/GeertJohan/go.rice/.gitignore b/vendor/github.com/GeertJohan/go.rice/.gitignore new file mode 100644 index 0000000000..a3c98197a0 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/.gitignore @@ -0,0 +1,8 @@ +/example/example +/example/example.exe +/rice/rice +/rice/rice.exe + +*.rice-box.go +*.rice-box.syso +.wercker diff --git a/vendor/github.com/GeertJohan/go.rice/.travis.yml b/vendor/github.com/GeertJohan/go.rice/.travis.yml new file mode 100644 index 0000000000..878cdebf0d --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - master + - 1.11.x + - 1.10.x + - 1.9.x + - 1.8.x + +install: + - go get -t ./... + - env +script: + - go build -x ./... + - go test -cover ./... + - go vet ./... diff --git a/vendor/github.com/GeertJohan/go.rice/AUTHORS b/vendor/github.com/GeertJohan/go.rice/AUTHORS new file mode 100644 index 0000000000..20ff8ba6b9 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/AUTHORS @@ -0,0 +1,4 @@ +Geert-Johan Riemer +Paul Maddox +Vincent Petithory + diff --git a/vendor/github.com/GeertJohan/go.rice/LICENSE b/vendor/github.com/GeertJohan/go.rice/LICENSE new file mode 100644 index 0000000000..8b4409d7f8 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013, Geert-Johan Riemer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/GeertJohan/go.rice/README.md b/vendor/github.com/GeertJohan/go.rice/README.md new file mode 100644 index 0000000000..feef3a1fc3 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/README.md @@ -0,0 +1,142 @@ +# go.rice + +[![Build Status](https://travis-ci.org/GeertJohan/go.rice.png)](https://travis-ci.org/GeertJohan/go.rice) +[![Godoc](https://img.shields.io/badge/godoc-go.rice-blue.svg?style=flat-square)](https://godoc.org/github.com/GeertJohan/go.rice) + +go.rice is a [Go](http://golang.org) package that makes working with resources such as html,js,css,images and templates easy. During development `go.rice` will load required files directly from disk. Upon deployment it's easy to add all resource files to a executable using the `rice` tool, without changing the source code for your package. go.rice provides methods to add resources to a binary in different scenarios. + +## What does it do + +The first thing go.rice does is finding the correct absolute path for your resource files. Say you are executing a binary in your home directory, but your `html-files` are in `$GOPATH/src/yourApplication/html-files`. `go.rice` will lookup the correct path for that directory (relative to the location of yourApplication). All you have to do is include the resources using `rice.FindBox("html-files")`. + +This works fine when the source is available to the machine executing the binary, which is the case when installing the executable with `go get` or `go install`. But it does not work when you wish to provide a single binary without source. This is where the `rice` tool comes in. It analyses source code and finds call's to `rice.FindBox(..)`. Then it adds the required directories to the executable binary, There are two strategies to do this. You can 'embed' the assets by generating go source code and then compile them into the executable binary, or you can 'append' the assets to the executable binary after compiling. In both cases the `rice.FindBox(..)` call detects the embedded or appended resources and load those, instead of looking up files from disk. + +## Installation + +Use `go get` to install the package the `rice` tool. + +```bash +go get github.com/GeertJohan/go.rice +go get github.com/GeertJohan/go.rice/rice +``` + +## Package usage + +Import the package: `import "github.com/GeertJohan/go.rice"` + +Serving a static content folder over HTTP with a rice Box: + +```go +http.Handle("/", http.FileServer(rice.MustFindBox("http-files").HTTPBox())) +http.ListenAndServe(":8080", nil) +``` + +Serve a static content folder over HTTP at a non-root location: + +```go +box := rice.MustFindBox("cssfiles") +cssFileServer := http.StripPrefix("/css/", http.FileServer(box.HTTPBox())) +http.Handle("/css/", cssFileServer) +http.ListenAndServe(":8080", nil) +``` + +Note the *trailing slash* in `/css/` in both the call to +`http.StripPrefix` and `http.Handle`. + +Loading a template: + +```go +// find a rice.Box +templateBox, err := rice.FindBox("example-templates") +if err != nil { + log.Fatal(err) +} +// get file contents as string +templateString, err := templateBox.String("message.tmpl") +if err != nil { + log.Fatal(err) +} +// parse and execute the template +tmplMessage, err := template.New("message").Parse(templateString) +if err != nil { + log.Fatal(err) +} +tmplMessage.Execute(os.Stdout, map[string]string{"Message": "Hello, world!"}) + +``` + +Never call `FindBox()` or `MustFindBox()` from an `init()` function, as there is no guarantee the boxes are loaded at that time. + +### Calling FindBox and MustFindBox + +Always call `FindBox()` or `MustFindBox()` with string literals e.g. `FindBox("example")`. Do not use string constants or variables. This will prevent the rice tool to fail with error `Error: found call to rice.FindBox, but argument must be a string literal.`. + +## Tool usage + +The `rice` tool lets you add the resources to a binary executable so the files are not loaded from the filesystem anymore. This creates a 'standalone' executable. There are multiple strategies to add the resources and assets to a binary, each has pro's and con's but all will work without requiring changes to the way you load the resources. + +### `rice embed-go`: Embed resources by generating Go source code + +Execute this method before building. It generates a single Go source file called *rice-box.go* for each package. The generated go file contains all assets. The Go tool compiles this into the binary. + +The downside with this option is that the generated go source file can become large, which may slow down compilation and requires more memory to compile. + +Execute the following commands: + +```bash +rice embed-go +go build +``` + +*A Note on Symbolic Links*: `embed-go` uses the `os.Walk` function from the standard library. The `os.Walk` function does **not** follow symbolic links. When creating a box, be aware that any symbolic links inside your box's directory are not followed. When the box itself is a symbolic link, the rice tool resolves its actual location before adding the contents. + +### `rice embed-syso`: Embed resources by generating a coff .syso file and some .go source code + +** This method is experimental. Do not use for production systems. ** + +Execute this method before building. It generates a COFF .syso file and Go source file. The Go compiler then compiles these files into the binary. + +Execute the following commands: + +```bash +rice embed-syso +go build +``` + +### `rice append`: Append resources to executable as zip file + +This method changes an already built executable. It appends the resources as zip file to the binary. It makes compilation a lot faster. Using the append method works great for adding large assets to an executable binary. + +A downside for appending is that it does not provide a working Seek method. + +Run the following commands to create a standalone executable. + +```bash +go build -o example +rice append --exec example +``` + +## Help information + +Run `rice --help` for information about all flags and subcommands. + +You can use the `--help` flag on each sub-command. For example: `rice append --help`. + +## Order of precedence + +When opening a new box, the `rice.FindBox(..)` tries to locate the resources in the following order: + +- embedded (generated as `rice-box.go`) +- appended (appended to the binary executable after compiling) +- 'live' from filesystem + +## License + +This project is licensed under a Simplified BSD license. Please read the [LICENSE file][license]. + +## Package documentation + +You will find package documentation at [godoc.org/github.com/GeertJohan/go.rice][godoc]. + +[license]: https://github.com/GeertJohan/go.rice/blob/master/LICENSE +[godoc]: http://godoc.org/github.com/GeertJohan/go.rice diff --git a/vendor/github.com/GeertJohan/go.rice/appended.go b/vendor/github.com/GeertJohan/go.rice/appended.go new file mode 100644 index 0000000000..38d78f9221 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/appended.go @@ -0,0 +1,142 @@ +package rice + +import ( + "archive/zip" + "log" + "os" + "path/filepath" + "strings" + "time" + + "github.com/daaku/go.zipexe" +) + +// appendedBox defines an appended box +type appendedBox struct { + Name string // box name + Files map[string]*appendedFile // appended files (*zip.File) by full path + Time time.Time +} + +type appendedFile struct { + zipFile *zip.File + dir bool + dirInfo *appendedDirInfo + children []*appendedFile + content []byte +} + +// appendedBoxes is a public register of appendes boxes +var appendedBoxes = make(map[string]*appendedBox) + +func init() { + // find if exec is appended + thisFile, err := os.Executable() + if err != nil { + return // not appended or cant find self executable + } + thisFile, err = filepath.EvalSymlinks(thisFile) + if err != nil { + return + } + closer, rd, err := zipexe.OpenCloser(thisFile) + if err != nil { + return // not appended + } + defer closer.Close() + + for _, f := range rd.File { + // get box and file name from f.Name + fileParts := strings.SplitN(strings.TrimLeft(filepath.ToSlash(f.Name), "/"), "/", 2) + boxName := fileParts[0] + var fileName string + if len(fileParts) > 1 { + fileName = fileParts[1] + } + + // find box or create new one if doesn't exist + box := appendedBoxes[boxName] + if box == nil { + box = &appendedBox{ + Name: boxName, + Files: make(map[string]*appendedFile), + Time: f.ModTime(), + } + appendedBoxes[boxName] = box + } + + // create and add file to box + af := &appendedFile{ + zipFile: f, + } + if f.Comment == "dir" { + af.dir = true + af.dirInfo = &appendedDirInfo{ + name: filepath.Base(af.zipFile.Name), + time: af.zipFile.ModTime(), + } + } else { + // this is a file, we need it's contents so we can create a bytes.Reader when the file is opened + // make a new byteslice + af.content = make([]byte, af.zipFile.FileInfo().Size()) + // ignore reading empty files from zip (empty file still is a valid file to be read though!) + if len(af.content) > 0 { + // open io.ReadCloser + rc, err := af.zipFile.Open() + if err != nil { + af.content = nil // this will cause an error when the file is being opened or seeked (which is good) + // TODO: it's quite blunt to just log this stuff. but this is in init, so rice.Debug can't be changed yet.. + log.Printf("error opening appended file %s: %v", af.zipFile.Name, err) + } else { + _, err = rc.Read(af.content) + rc.Close() + if err != nil { + af.content = nil // this will cause an error when the file is being opened or seeked (which is good) + // TODO: it's quite blunt to just log this stuff. but this is in init, so rice.Debug can't be changed yet.. + log.Printf("error reading data for appended file %s: %v", af.zipFile.Name, err) + } + } + } + } + + // add appendedFile to box file list + box.Files[fileName] = af + + // add to parent dir (if any) + dirName := filepath.Dir(fileName) + if dirName == "." { + dirName = "" + } + if fileName != "" { // don't make box root dir a child of itself + if dir := box.Files[dirName]; dir != nil { + dir.children = append(dir.children, af) + } + } + } +} + +// implements os.FileInfo. +// used for Readdir() +type appendedDirInfo struct { + name string + time time.Time +} + +func (adi *appendedDirInfo) Name() string { + return adi.name +} +func (adi *appendedDirInfo) Size() int64 { + return 0 +} +func (adi *appendedDirInfo) Mode() os.FileMode { + return os.ModeDir +} +func (adi *appendedDirInfo) ModTime() time.Time { + return adi.time +} +func (adi *appendedDirInfo) IsDir() bool { + return true +} +func (adi *appendedDirInfo) Sys() interface{} { + return nil +} diff --git a/vendor/github.com/GeertJohan/go.rice/box.go b/vendor/github.com/GeertJohan/go.rice/box.go new file mode 100644 index 0000000000..208b168965 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/box.go @@ -0,0 +1,339 @@ +package rice + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/GeertJohan/go.rice/embedded" +) + +// Box abstracts a directory for resources/files. +// It can either load files from disk, or from embedded code (when `rice --embed` was ran). +type Box struct { + name string + absolutePath string + embed *embedded.EmbeddedBox + appendd *appendedBox +} + +var defaultLocateOrder = []LocateMethod{LocateEmbedded, LocateAppended, LocateFS} + +func findBox(name string, order []LocateMethod) (*Box, error) { + b := &Box{name: name} + + // no support for absolute paths since gopath can be different on different machines. + // therefore, required box must be located relative to package requiring it. + if filepath.IsAbs(name) { + return nil, errors.New("given name/path is absolute") + } + + var err error + for _, method := range order { + switch method { + case LocateEmbedded: + if embed := embedded.EmbeddedBoxes[name]; embed != nil { + b.embed = embed + return b, nil + } + + case LocateAppended: + appendedBoxName := strings.Replace(name, `/`, `-`, -1) + if appendd := appendedBoxes[appendedBoxName]; appendd != nil { + b.appendd = appendd + return b, nil + } + + case LocateFS: + // resolve absolute directory path + err := b.resolveAbsolutePathFromCaller() + if err != nil { + continue + } + // check if absolutePath exists on filesystem + info, err := os.Stat(b.absolutePath) + if err != nil { + continue + } + // check if absolutePath is actually a directory + if !info.IsDir() { + err = errors.New("given name/path is not a directory") + continue + } + return b, nil + case LocateWorkingDirectory: + // resolve absolute directory path + err := b.resolveAbsolutePathFromWorkingDirectory() + if err != nil { + continue + } + // check if absolutePath exists on filesystem + info, err := os.Stat(b.absolutePath) + if err != nil { + continue + } + // check if absolutePath is actually a directory + if !info.IsDir() { + err = errors.New("given name/path is not a directory") + continue + } + return b, nil + } + } + + if err == nil { + err = fmt.Errorf("could not locate box %q", name) + } + + return nil, err +} + +// FindBox returns a Box instance for given name. +// When the given name is a relative path, it's base path will be the calling pkg/cmd's source root. +// When the given name is absolute, it's absolute. derp. +// Make sure the path doesn't contain any sensitive information as it might be placed into generated go source (embedded). +func FindBox(name string) (*Box, error) { + return findBox(name, defaultLocateOrder) +} + +// MustFindBox returns a Box instance for given name, like FindBox does. +// It does not return an error, instead it panics when an error occurs. +func MustFindBox(name string) *Box { + box, err := findBox(name, defaultLocateOrder) + if err != nil { + panic(err) + } + return box +} + +// This is injected as a mutable function literal so that we can mock it out in +// tests and return a fixed test file. +var resolveAbsolutePathFromCaller = func(name string, nStackFrames int) (string, error) { + _, callingGoFile, _, ok := runtime.Caller(nStackFrames) + if !ok { + return "", errors.New("couldn't find caller on stack") + } + + // resolve to proper path + pkgDir := filepath.Dir(callingGoFile) + // fix for go cover + const coverPath = "_test/_obj_test" + if !filepath.IsAbs(pkgDir) { + if i := strings.Index(pkgDir, coverPath); i >= 0 { + pkgDir = pkgDir[:i] + pkgDir[i+len(coverPath):] // remove coverPath + pkgDir = filepath.Join(os.Getenv("GOPATH"), "src", pkgDir) // make absolute + } + } + return filepath.Join(pkgDir, name), nil +} + +func (b *Box) resolveAbsolutePathFromCaller() error { + path, err := resolveAbsolutePathFromCaller(b.name, 4) + if err != nil { + return err + } + b.absolutePath = path + return nil + +} + +func (b *Box) resolveAbsolutePathFromWorkingDirectory() error { + path, err := os.Getwd() + if err != nil { + return err + } + b.absolutePath = filepath.Join(path, b.name) + return nil +} + +// IsEmbedded indicates wether this box was embedded into the application +func (b *Box) IsEmbedded() bool { + return b.embed != nil +} + +// IsAppended indicates wether this box was appended to the application +func (b *Box) IsAppended() bool { + return b.appendd != nil +} + +// Time returns how actual the box is. +// When the box is embedded, it's value is saved in the embedding code. +// When the box is live, this methods returns time.Now() +func (b *Box) Time() time.Time { + if b.IsEmbedded() { + return b.embed.Time + } + + if b.IsAppended() { + return b.appendd.Time + } + + return time.Now() +} + +// Open opens a File from the box +// If there is an error, it will be of type *os.PathError. +func (b *Box) Open(name string) (*File, error) { + if Debug { + fmt.Printf("Open(%s)\n", name) + } + + if b.IsEmbedded() { + if Debug { + fmt.Println("Box is embedded") + } + + // trim prefix (paths are relative to box) + name = strings.TrimLeft(name, "/") + if Debug { + fmt.Printf("Trying %s\n", name) + } + + // search for file + ef := b.embed.Files[name] + if ef == nil { + if Debug { + fmt.Println("Didn't find file in embed") + } + // file not found, try dir + ed := b.embed.Dirs[name] + if ed == nil { + if Debug { + fmt.Println("Didn't find dir in embed") + } + // dir not found, error out + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: os.ErrNotExist, + } + } + if Debug { + fmt.Println("Found dir. Returning virtual dir") + } + vd := newVirtualDir(ed) + return &File{virtualD: vd}, nil + } + + // box is embedded + if Debug { + fmt.Println("Found file. Returning virtual file") + } + vf := newVirtualFile(ef) + return &File{virtualF: vf}, nil + } + + if b.IsAppended() { + // trim prefix (paths are relative to box) + name = strings.TrimLeft(name, "/") + + // search for file + appendedFile := b.appendd.Files[name] + if appendedFile == nil { + return nil, &os.PathError{ + Op: "open", + Path: name, + Err: os.ErrNotExist, + } + } + + // create new file + f := &File{ + appendedF: appendedFile, + } + + // if this file is a directory, we want to be able to read and seek + if !appendedFile.dir { + // looks like malformed data in zip, error now + if appendedFile.content == nil { + return nil, &os.PathError{ + Op: "open", + Path: "name", + Err: errors.New("error reading data from zip file"), + } + } + // create new bytes.Reader + f.appendedFileReader = bytes.NewReader(appendedFile.content) + } + + // all done + return f, nil + } + + // perform os open + if Debug { + fmt.Printf("Using os.Open(%s)", filepath.Join(b.absolutePath, name)) + } + file, err := os.Open(filepath.Join(b.absolutePath, name)) + if err != nil { + return nil, err + } + return &File{realF: file}, nil +} + +// Bytes returns the content of the file with given name as []byte. +func (b *Box) Bytes(name string) ([]byte, error) { + file, err := b.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + content, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + return content, nil +} + +// MustBytes returns the content of the file with given name as []byte. +// panic's on error. +func (b *Box) MustBytes(name string) []byte { + bts, err := b.Bytes(name) + if err != nil { + panic(err) + } + return bts +} + +// String returns the content of the file with given name as string. +func (b *Box) String(name string) (string, error) { + // check if box is embedded, optimized fast path + if b.IsEmbedded() { + // find file in embed + ef := b.embed.Files[name] + if ef == nil { + return "", os.ErrNotExist + } + // return as string + return ef.Content, nil + } + + bts, err := b.Bytes(name) + if err != nil { + return "", err + } + return string(bts), nil +} + +// MustString returns the content of the file with given name as string. +// panic's on error. +func (b *Box) MustString(name string) string { + str, err := b.String(name) + if err != nil { + panic(err) + } + return str +} + +// Name returns the name of the box +func (b *Box) Name() string { + return b.name +} diff --git a/vendor/github.com/GeertJohan/go.rice/config.go b/vendor/github.com/GeertJohan/go.rice/config.go new file mode 100644 index 0000000000..45eb398fce --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/config.go @@ -0,0 +1,39 @@ +package rice + +// LocateMethod defines how a box is located. +type LocateMethod int + +const ( + LocateFS = LocateMethod(iota) // Locate on the filesystem according to package path. + LocateAppended // Locate boxes appended to the executable. + LocateEmbedded // Locate embedded boxes. + LocateWorkingDirectory // Locate on the binary working directory +) + +// Config allows customizing the box lookup behavior. +type Config struct { + // LocateOrder defines the priority order that boxes are searched for. By + // default, the package global FindBox searches for embedded boxes first, + // then appended boxes, and then finally boxes on the filesystem. That + // search order may be customized by provided the ordered list here. Leaving + // out a particular method will omit that from the search space. For + // example, []LocateMethod{LocateEmbedded, LocateAppended} will never search + // the filesystem for boxes. + LocateOrder []LocateMethod +} + +// FindBox searches for boxes using the LocateOrder of the config. +func (c *Config) FindBox(boxName string) (*Box, error) { + return findBox(boxName, c.LocateOrder) +} + +// MustFindBox searches for boxes using the LocateOrder of the config, like +// FindBox does. It does not return an error, instead it panics when an error +// occurs. +func (c *Config) MustFindBox(boxName string) *Box { + box, err := findBox(boxName, c.LocateOrder) + if err != nil { + panic(err) + } + return box +} diff --git a/vendor/github.com/GeertJohan/go.rice/debug.go b/vendor/github.com/GeertJohan/go.rice/debug.go new file mode 100644 index 0000000000..2e68c842e5 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/debug.go @@ -0,0 +1,4 @@ +package rice + +// Debug can be set to true to enable debugging. +var Debug = false diff --git a/vendor/github.com/GeertJohan/go.rice/embedded.go b/vendor/github.com/GeertJohan/go.rice/embedded.go new file mode 100644 index 0000000000..4f03fe1fea --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/embedded.go @@ -0,0 +1,90 @@ +package rice + +import ( + "os" + "time" + + "github.com/GeertJohan/go.rice/embedded" +) + +// re-type to make exported methods invisible to user (godoc) +// they're not required for the user +// embeddedDirInfo implements os.FileInfo +type embeddedDirInfo embedded.EmbeddedDir + +// Name returns the base name of the directory +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Name() string { + return ed.Filename +} + +// Size always returns 0 +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Size() int64 { + return 0 +} + +// Mode returns the file mode bits +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Mode() os.FileMode { + return os.FileMode(0555 | os.ModeDir) // dr-xr-xr-x +} + +// ModTime returns the modification time +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) ModTime() time.Time { + return ed.DirModTime +} + +// IsDir returns the abbreviation for Mode().IsDir() (always true) +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) IsDir() bool { + return true +} + +// Sys returns the underlying data source (always nil) +// (implementing os.FileInfo) +func (ed *embeddedDirInfo) Sys() interface{} { + return nil +} + +// re-type to make exported methods invisible to user (godoc) +// they're not required for the user +// embeddedFileInfo implements os.FileInfo +type embeddedFileInfo embedded.EmbeddedFile + +// Name returns the base name of the file +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Name() string { + return ef.Filename +} + +// Size returns the length in bytes for regular files; system-dependent for others +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Size() int64 { + return int64(len(ef.Content)) +} + +// Mode returns the file mode bits +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Mode() os.FileMode { + return os.FileMode(0555) // r-xr-xr-x +} + +// ModTime returns the modification time +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) ModTime() time.Time { + return ef.FileModTime +} + +// IsDir returns the abbreviation for Mode().IsDir() (always false) +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) IsDir() bool { + return false +} + +// Sys returns the underlying data source (always nil) +// (implementing os.FileInfo) +func (ef *embeddedFileInfo) Sys() interface{} { + return nil +} diff --git a/vendor/github.com/GeertJohan/go.rice/embedded/embedded.go b/vendor/github.com/GeertJohan/go.rice/embedded/embedded.go new file mode 100644 index 0000000000..a83dd2ddd2 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/embedded/embedded.go @@ -0,0 +1,83 @@ +// Package embedded defines embedded data types that are shared between the go.rice package and generated code. +package embedded + +import ( + "fmt" + "path/filepath" + "strings" + "time" +) + +const ( + EmbedTypeGo = 0 + EmbedTypeSyso = 1 +) + +// EmbeddedBox defines an embedded box +type EmbeddedBox struct { + Name string // box name + Time time.Time // embed time + EmbedType int // kind of embedding + Files map[string]*EmbeddedFile // ALL embedded files by full path + Dirs map[string]*EmbeddedDir // ALL embedded dirs by full path +} + +// Link creates the ChildDirs and ChildFiles links in all EmbeddedDir's +func (e *EmbeddedBox) Link() { + for _, ed := range e.Dirs { + ed.ChildDirs = make([]*EmbeddedDir, 0) + ed.ChildFiles = make([]*EmbeddedFile, 0) + } + for path, ed := range e.Dirs { + // skip for root, it'll create a recursion + if path == "" { + continue + } + parentDirpath, _ := filepath.Split(path) + if strings.HasSuffix(parentDirpath, "/") { + parentDirpath = parentDirpath[:len(parentDirpath)-1] + } + parentDir := e.Dirs[parentDirpath] + if parentDir == nil { + panic("parentDir `" + parentDirpath + "` is missing in embedded box") + } + parentDir.ChildDirs = append(parentDir.ChildDirs, ed) + } + for path, ef := range e.Files { + dirpath, _ := filepath.Split(path) + if strings.HasSuffix(dirpath, "/") { + dirpath = dirpath[:len(dirpath)-1] + } + dir := e.Dirs[dirpath] + if dir == nil { + panic("dir `" + dirpath + "` is missing in embedded box") + } + dir.ChildFiles = append(dir.ChildFiles, ef) + } +} + +// EmbeddedDir is instanced in the code generated by the rice tool and contains all necicary information about an embedded file +type EmbeddedDir struct { + Filename string + DirModTime time.Time + ChildDirs []*EmbeddedDir // direct childs, as returned by virtualDir.Readdir() + ChildFiles []*EmbeddedFile // direct childs, as returned by virtualDir.Readdir() +} + +// EmbeddedFile is instanced in the code generated by the rice tool and contains all necicary information about an embedded file +type EmbeddedFile struct { + Filename string // filename + FileModTime time.Time + Content string +} + +// EmbeddedBoxes is a public register of embedded boxes +var EmbeddedBoxes = make(map[string]*EmbeddedBox) + +// RegisterEmbeddedBox registers an EmbeddedBox +func RegisterEmbeddedBox(name string, box *EmbeddedBox) { + if _, exists := EmbeddedBoxes[name]; exists { + panic(fmt.Sprintf("EmbeddedBox with name `%s` exists already", name)) + } + EmbeddedBoxes[name] = box +} diff --git a/vendor/github.com/GeertJohan/go.rice/example/example-files/file.txt b/vendor/github.com/GeertJohan/go.rice/example/example-files/file.txt new file mode 100644 index 0000000000..315457845b --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/example/example-files/file.txt @@ -0,0 +1,2 @@ +test content +break \ No newline at end of file diff --git a/vendor/github.com/GeertJohan/go.rice/example/example-files/img/doge.jpg b/vendor/github.com/GeertJohan/go.rice/example/example-files/img/doge.jpg new file mode 100644 index 0000000000..6660dc9b2f Binary files /dev/null and b/vendor/github.com/GeertJohan/go.rice/example/example-files/img/doge.jpg differ diff --git a/vendor/github.com/GeertJohan/go.rice/example/example-templates/message.tmpl b/vendor/github.com/GeertJohan/go.rice/example/example-templates/message.tmpl new file mode 100644 index 0000000000..4b7638dc9e --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/example/example-templates/message.tmpl @@ -0,0 +1 @@ +I have a message for you: {{.Message}} diff --git a/vendor/github.com/GeertJohan/go.rice/example/example.go b/vendor/github.com/GeertJohan/go.rice/example/example.go new file mode 100644 index 0000000000..68f189f30b --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/example/example.go @@ -0,0 +1,69 @@ +package main + +import ( + "encoding/hex" + "fmt" + "log" + "net/http" + "os" + "text/template" + + "github.com/GeertJohan/go.rice" + "github.com/davecgh/go-spew/spew" +) + +func main() { + conf := rice.Config{ + LocateOrder: []rice.LocateMethod{rice.LocateEmbedded, rice.LocateAppended, rice.LocateFS}, + } + box, err := conf.FindBox("example-files") + if err != nil { + log.Fatalf("error opening rice.Box: %s\n", err) + } + // spew.Dump(box) + + contentString, err := box.String("file.txt") + if err != nil { + log.Fatalf("could not read file contents as string: %s\n", err) + } + log.Printf("Read some file contents as string:\n%s\n", contentString) + + contentBytes, err := box.Bytes("file.txt") + if err != nil { + log.Fatalf("could not read file contents as byteSlice: %s\n", err) + } + log.Printf("Read some file contents as byteSlice:\n%s\n", hex.Dump(contentBytes)) + + file, err := box.Open("file.txt") + if err != nil { + log.Fatalf("could not open file: %s\n", err) + } + spew.Dump(file) + + // find/create a rice.Box + templateBox, err := rice.FindBox("example-templates") + if err != nil { + log.Fatal(err) + } + // get file contents as string + templateString, err := templateBox.String("message.tmpl") + if err != nil { + log.Fatal(err) + } + // parse and execute the template + tmplMessage, err := template.New("message").Parse(templateString) + if err != nil { + log.Fatal(err) + } + tmplMessage.Execute(os.Stdout, map[string]string{"Message": "Hello, world!"}) + + http.Handle("/", http.FileServer(box.HTTPBox())) + go func() { + fmt.Println("Serving files on :8080, press ctrl-C to exit") + err := http.ListenAndServe(":8080", nil) + if err != nil { + log.Fatalf("error serving files: %v", err) + } + }() + select {} +} diff --git a/vendor/github.com/GeertJohan/go.rice/file.go b/vendor/github.com/GeertJohan/go.rice/file.go new file mode 100644 index 0000000000..bcdd3759a3 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/file.go @@ -0,0 +1,171 @@ +package rice + +import ( + "bytes" + "errors" + "os" + "path/filepath" +) + +// File implements the io.Reader, io.Seeker, io.Closer and http.File interfaces +type File struct { + // File abstracts file methods so the user doesn't see the difference between rice.virtualFile, rice.virtualDir and os.File + // TODO: maybe use internal File interface and four implementations: *os.File, appendedFile, virtualFile, virtualDir + + // real file on disk + realF *os.File + + // when embedded (go) + virtualF *virtualFile + virtualD *virtualDir + + // when appended (zip) + appendedF *appendedFile + appendedFileReader *bytes.Reader + // TODO: is appendedFileReader subject of races? Might need a lock here.. +} + +// Close is like (*os.File).Close() +// Visit http://golang.org/pkg/os/#File.Close for more information +func (f *File) Close() error { + if f.appendedF != nil { + if f.appendedFileReader == nil { + return errors.New("already closed") + } + f.appendedFileReader = nil + return nil + } + if f.virtualF != nil { + return f.virtualF.close() + } + if f.virtualD != nil { + return f.virtualD.close() + } + return f.realF.Close() +} + +// Stat is like (*os.File).Stat() +// Visit http://golang.org/pkg/os/#File.Stat for more information +func (f *File) Stat() (os.FileInfo, error) { + if f.appendedF != nil { + if f.appendedF.dir { + return f.appendedF.dirInfo, nil + } + if f.appendedFileReader == nil { + return nil, errors.New("file is closed") + } + return f.appendedF.zipFile.FileInfo(), nil + } + if f.virtualF != nil { + return f.virtualF.stat() + } + if f.virtualD != nil { + return f.virtualD.stat() + } + return f.realF.Stat() +} + +// Readdir is like (*os.File).Readdir() +// Visit http://golang.org/pkg/os/#File.Readdir for more information +func (f *File) Readdir(count int) ([]os.FileInfo, error) { + if f.appendedF != nil { + if f.appendedF.dir { + fi := make([]os.FileInfo, 0, len(f.appendedF.children)) + for _, childAppendedFile := range f.appendedF.children { + if childAppendedFile.dir { + fi = append(fi, childAppendedFile.dirInfo) + } else { + fi = append(fi, childAppendedFile.zipFile.FileInfo()) + } + } + return fi, nil + } + //++ TODO: is os.ErrInvalid the correct error for Readdir on file? + return nil, os.ErrInvalid + } + if f.virtualF != nil { + return f.virtualF.readdir(count) + } + if f.virtualD != nil { + return f.virtualD.readdir(count) + } + return f.realF.Readdir(count) +} + +// Readdirnames is like (*os.File).Readdirnames() +// Visit http://golang.org/pkg/os/#File.Readdirnames for more information +func (f *File) Readdirnames(count int) ([]string, error) { + if f.appendedF != nil { + if f.appendedF.dir { + names := make([]string, 0, len(f.appendedF.children)) + for _, childAppendedFile := range f.appendedF.children { + if childAppendedFile.dir { + names = append(names, childAppendedFile.dirInfo.name) + } else { + names = append(names, childAppendedFile.zipFile.FileInfo().Name()) + } + } + return names, nil + } + // os.ErrInvalid to match the os.SyscallError (readdirent: invalid argument) that os.File returns + return nil, os.ErrInvalid + } + if f.virtualF != nil { + return f.virtualF.readdirnames(count) + } + if f.virtualD != nil { + return f.virtualD.readdirnames(count) + } + return f.realF.Readdirnames(count) +} + +// Read is like (*os.File).Read() +// Visit http://golang.org/pkg/os/#File.Read for more information +func (f *File) Read(bts []byte) (int, error) { + if f.appendedF != nil { + if f.appendedFileReader == nil { + return 0, &os.PathError{ + Op: "read", + Path: filepath.Base(f.appendedF.zipFile.Name), + Err: errors.New("file is closed"), + } + } + if f.appendedF.dir { + return 0, &os.PathError{ + Op: "read", + Path: filepath.Base(f.appendedF.zipFile.Name), + Err: errors.New("is a directory"), + } + } + return f.appendedFileReader.Read(bts) + } + if f.virtualF != nil { + return f.virtualF.read(bts) + } + if f.virtualD != nil { + return f.virtualD.read(bts) + } + return f.realF.Read(bts) +} + +// Seek is like (*os.File).Seek() +// Visit http://golang.org/pkg/os/#File.Seek for more information +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.appendedF != nil { + if f.appendedFileReader == nil { + return 0, &os.PathError{ + Op: "seek", + Path: filepath.Base(f.appendedF.zipFile.Name), + Err: errors.New("file is closed"), + } + } + return f.appendedFileReader.Seek(offset, whence) + } + if f.virtualF != nil { + return f.virtualF.seek(offset, whence) + } + if f.virtualD != nil { + return f.virtualD.seek(offset, whence) + } + return f.realF.Seek(offset, whence) +} diff --git a/vendor/github.com/GeertJohan/go.rice/go.mod b/vendor/github.com/GeertJohan/go.rice/go.mod new file mode 100644 index 0000000000..28ec8cc882 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/go.mod @@ -0,0 +1,13 @@ +module github.com/GeertJohan/go.rice + +go 1.12 + +require ( + github.com/GeertJohan/go.incremental v1.0.0 + github.com/akavel/rsrc v0.8.0 + github.com/daaku/go.zipexe v1.0.0 + github.com/davecgh/go-spew v1.1.1 + github.com/jessevdk/go-flags v1.4.0 + github.com/nkovacs/streamquote v1.0.0 + github.com/valyala/fasttemplate v1.0.1 +) diff --git a/vendor/github.com/GeertJohan/go.rice/go.sum b/vendor/github.com/GeertJohan/go.rice/go.sum new file mode 100644 index 0000000000..513cc3a38c --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/go.sum @@ -0,0 +1,16 @@ +github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= +github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= diff --git a/vendor/github.com/GeertJohan/go.rice/http.go b/vendor/github.com/GeertJohan/go.rice/http.go new file mode 100644 index 0000000000..3a61f0e12d --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/http.go @@ -0,0 +1,21 @@ +package rice + +import ( + "net/http" +) + +// HTTPBox implements http.FileSystem which allows the use of Box with a http.FileServer. +// e.g.: http.Handle("/", http.FileServer(rice.MustFindBox("http-files").HTTPBox())) +type HTTPBox struct { + *Box +} + +// HTTPBox creates a new HTTPBox from an existing Box +func (b *Box) HTTPBox() *HTTPBox { + return &HTTPBox{b} +} + +// Open returns a File using the http.File interface +func (hb *HTTPBox) Open(name string) (http.File, error) { + return hb.Box.Open(name) +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/append.go b/vendor/github.com/GeertJohan/go.rice/rice/append.go new file mode 100644 index 0000000000..80ab109300 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/append.go @@ -0,0 +1,159 @@ +package main + +import ( + "archive/zip" + "fmt" + "go/build" + "io" + "os" + "path/filepath" + "strings" + "time" + + zipexe "github.com/daaku/go.zipexe" +) + +func operationAppend(pkgs []*build.Package) { + // create tmp zipfile + tmpZipfileName := filepath.Join(os.TempDir(), fmt.Sprintf("ricebox-%d-%s.zip", time.Now().Unix(), randomString(10))) + verbosef("Will create tmp zipfile: %s\n", tmpZipfileName) + tmpZipfile, err := os.Create(tmpZipfileName) + if err != nil { + fmt.Printf("Error creating tmp zipfile: %s\n", err) + os.Exit(1) + } + defer func() { + tmpZipfile.Close() + os.Remove(tmpZipfileName) + }() + + // find abs path for binary file + binfileName, err := filepath.Abs(flags.Append.Executable) + if err != nil { + fmt.Printf("Error finding absolute path for executable to append: %s\n", err) + os.Exit(1) + } + verbosef("Will append to file: %s\n", binfileName) + + // check that command doesn't already have zip appended + if rd, _ := zipexe.Open(binfileName); rd != nil { + fmt.Printf("Cannot append to already appended executable. Please remove %s and build a fresh one.\n", binfileName) + os.Exit(1) + } + + // open binfile + binfile, err := os.OpenFile(binfileName, os.O_WRONLY, os.ModeAppend) + if err != nil { + fmt.Printf("Error: unable to open executable file: %s\n", err) + os.Exit(1) + } + defer binfile.Close() + + binfileInfo, err := binfile.Stat() + if err != nil { + fmt.Printf("Error: unable to stat executable file: %s\n", err) + os.Exit(1) + } + + // create zip.Writer + zipWriter := zip.NewWriter(tmpZipfile) + + // write the zip offset into the zip data + zipWriter.SetOffset(binfileInfo.Size()) + + for _, pkg := range pkgs { + // find boxes for this command + boxMap := findBoxes(pkg) + + // notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ? + if len(boxMap) == 0 { + fmt.Printf("no calls to rice.FindBox() or rice.MustFindBox() found in import path `%s`\n", pkg.ImportPath) + continue + } + + verbosef("\n") + + for boxname := range boxMap { + appendedBoxName := strings.Replace(boxname, `/`, `-`, -1) + + // walk box path's and insert files + boxPath := filepath.Clean(filepath.Join(pkg.Dir, boxname)) + filepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error { + if info == nil { + fmt.Printf("Error: box \"%s\" not found on disk\n", path) + os.Exit(1) + } + // create zipFilename + zipFileName := filepath.Join(appendedBoxName, strings.TrimPrefix(path, boxPath)) + // write directories as empty file with comment "dir" + if info.IsDir() { + header := &zip.FileHeader{ + Name: zipFileName, + Comment: "dir", + } + header.SetModTime(info.ModTime()) + _, err := zipWriter.CreateHeader(header) + if err != nil { + fmt.Printf("Error creating dir in tmp zip: %s\n", err) + os.Exit(1) + } + return nil + } + + // create zipFileWriter + zipFileHeader, err := zip.FileInfoHeader(info) + if err != nil { + fmt.Printf("Error creating zip FileHeader: %v\n", err) + os.Exit(1) + } + zipFileHeader.Name = zipFileName + zipFileWriter, err := zipWriter.CreateHeader(zipFileHeader) + if err != nil { + fmt.Printf("Error creating file in tmp zip: %s\n", err) + os.Exit(1) + } + srcFile, err := os.Open(path) + if err != nil { + fmt.Printf("Error opening file to append: %s\n", err) + os.Exit(1) + } + _, err = io.Copy(zipFileWriter, srcFile) + if err != nil { + fmt.Printf("Error copying file contents to zip: %s\n", err) + os.Exit(1) + } + srcFile.Close() + + return nil + }) + } + } + + err = zipWriter.Close() + if err != nil { + fmt.Printf("Error closing tmp zipfile: %s\n", err) + os.Exit(1) + } + + err = tmpZipfile.Sync() + if err != nil { + fmt.Printf("Error syncing tmp zipfile: %s\n", err) + os.Exit(1) + } + _, err = tmpZipfile.Seek(0, 0) + if err != nil { + fmt.Printf("Error seeking tmp zipfile: %s\n", err) + os.Exit(1) + } + _, err = binfile.Seek(0, 2) + if err != nil { + fmt.Printf("Error seeking bin file: %s\n", err) + os.Exit(1) + } + + _, err = io.Copy(binfile, tmpZipfile) + if err != nil { + fmt.Printf("Error appending zipfile to executable: %s\n", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/clean.go b/vendor/github.com/GeertJohan/go.rice/rice/clean.go new file mode 100644 index 0000000000..8d7027feb5 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/clean.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "go/build" + "os" + "path/filepath" +) + +func operationClean(pkg *build.Package) { + filepath.Walk(pkg.Dir, func(filename string, info os.FileInfo, err error) error { + if err != nil { + fmt.Printf("error walking pkg dir to clean files: %v\n", err) + os.Exit(1) + } + if info.IsDir() { + return nil + } + verbosef("checking file '%s'\n", filename) + if generated(filename) { + err := os.Remove(filename) + if err != nil { + fmt.Printf("error removing file (%s): %s\n", filename, err) + os.Exit(-1) + } + verbosef("removed file '%s'\n", filename) + } + return nil + }) +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/embed-go.go b/vendor/github.com/GeertJohan/go.rice/rice/embed-go.go new file mode 100644 index 0000000000..64fb27b89b --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/embed-go.go @@ -0,0 +1,182 @@ +package main + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "go/build" + "go/format" + "io" + "log" + "os" + "path/filepath" + "strings" +) + +const boxFilename = "rice-box.go" + +// errEmptyBox is returned by writeBoxesGo when no calls to rice.FindBox +// are found in the package. +var errEmptyBox = errors.New("no calls to rice.FindBox() found") + +func writeBoxesGo(pkg *build.Package, out io.Writer) error { + boxMap := findBoxes(pkg) + + if len(boxMap) == 0 { + return errEmptyBox + } + + verbosef("\n") + out.Write([]byte("// Code generated by rice embed-go; DO NOT EDIT.\n")) + + var boxes []*boxDataType + + for boxname := range boxMap { + // find path and filename for this box + boxPath := filepath.Join(pkg.Dir, boxname) + + // Check to see if the path for the box is a symbolic link. If so, simply + // box what the symbolic link points to. Note: the filepath.Walk function + // will NOT follow any nested symbolic links. This only handles the case + // where the root of the box is a symbolic link. + symPath, serr := os.Readlink(boxPath) + if serr == nil { + boxPath = symPath + } + + // verbose info + verbosef("embedding box '%s' to '%s'\n", boxname, boxFilename) + + // read box metadata + boxInfo, ierr := os.Stat(boxPath) + if ierr != nil { + return fmt.Errorf("unable to access box at %s", boxPath) + } + + // create box datastructure (used by template) + box := &boxDataType{ + BoxName: boxname, + UnixNow: boxInfo.ModTime().Unix(), + Files: make([]*fileDataType, 0), + Dirs: make(map[string]*dirDataType), + } + + if !boxInfo.IsDir() { + return fmt.Errorf("box %s must point to a directory but points to %s instead", + boxname, boxPath) + } + + // fill box datastructure with file data + err := filepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error walking box: %s", err) + } + + filename := strings.TrimPrefix(path, boxPath) + filename = strings.Replace(filename, "\\", "/", -1) + filename = strings.TrimPrefix(filename, "/") + if info.IsDir() { + dirData := &dirDataType{ + Identifier: "dir" + nextIdentifier(), + FileName: filename, + ModTime: info.ModTime().Unix(), + ChildFiles: make([]*fileDataType, 0), + ChildDirs: make([]*dirDataType, 0), + } + verbosef("\tincludes dir: '%s'\n", dirData.FileName) + box.Dirs[dirData.FileName] = dirData + + // add tree entry (skip for root, it'll create a recursion) + if dirData.FileName != "" { + pathParts := strings.Split(dirData.FileName, "/") + parentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], "/")] + parentDir.ChildDirs = append(parentDir.ChildDirs, dirData) + } + } else if !generated(filename) { + fileData := &fileDataType{ + Identifier: "file" + nextIdentifier(), + FileName: filename, + ModTime: info.ModTime().Unix(), + } + verbosef("\tincludes file: '%s'\n", fileData.FileName) + + // Instead of injecting content, inject placeholder for fasttemplate. + // This allows us to stream the content into the final file, + // and it also avoids running gofmt on a very large source code. + fileData.Path = path + box.Files = append(box.Files, fileData) + + // add tree entry + pathParts := strings.Split(fileData.FileName, "/") + parentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], "/")] + if parentDir == nil { + return fmt.Errorf("parent of %s is not within the box", path) + } + parentDir.ChildFiles = append(parentDir.ChildFiles, fileData) + } + return nil + }) + if err != nil { + return fmt.Errorf("failed in filepath walk: %v", err) + } + boxes = append(boxes, box) + + } + + embedSourceUnformated := bytes.NewBuffer(make([]byte, 0)) + + // execute template to buffer + err := tmplEmbeddedBox.Execute( + embedSourceUnformated, + embedFileDataType{pkg.Name, boxes}, + ) + if err != nil { + return fmt.Errorf("error writing embedded box to file (template execute): %s", err) + } + + // format the source code + embedSource, err := format.Source(embedSourceUnformated.Bytes()) + if err != nil { + return fmt.Errorf("error formatting embedSource: %s", err) + } + + // write source to file + bufWriter := bufio.NewWriterSize(out, 100*1024) + err = embeddedBoxFasttemplate(bufWriter, string(embedSource)) + if err != nil { + return fmt.Errorf("error writing embedSource to file: %s\n", err) + } + err = bufWriter.Flush() + if err != nil { + return fmt.Errorf("error writing embedSource to file: %s", err) + } + return nil +} + +func operationEmbedGo(pkg *build.Package) { + // create go file for box + boxFile, err := os.Create(filepath.Join(pkg.Dir, boxFilename)) + if err != nil { + log.Printf("error creating embedded box file: %s\n", err) + os.Exit(1) + } + + err = writeBoxesGo(pkg, boxFile) + boxFile.Close() + if err != nil { + // don't leave an invalid go file in the package directory. + if errRemove := os.Remove(boxFile.Name()); errRemove != nil { + log.Printf("error while removing file: %s\n", errRemove) + } + if err != errEmptyBox { + log.Printf("error creating embedded box file: %s\n", err) + os.Exit(1) + } else { + // notify user when no calls to rice.FindBox are made, + // but don't fail, since it's useful to be able to run + // go.rice unconditionally. + log.Println(errEmptyBox) + } + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/embed-go_test.go b/vendor/github.com/GeertJohan/go.rice/rice/embed-go_test.go new file mode 100644 index 0000000000..b29c73a675 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/embed-go_test.go @@ -0,0 +1,135 @@ +package main + +import ( + "bytes" + "path/filepath" + "testing" +) + +func TestEmbedGo(t *testing.T) { + sourceFiles := []sourceFile{ + { + "boxes.go", + []byte(`package main + +import ( + "github.com/GeertJohan/go.rice" +) + +func main() { + rice.MustFindBox("foo") +} +`), + }, + { + "foo/test1.txt", + []byte(`This is test 1`), + }, + { + "foo/test2.txt", + []byte(`This is test 2`), + }, + { + "foo/bar/test1.txt", + []byte(`This is test 1 in bar`), + }, + { + "foo/bar/baz/test1.txt", + []byte(`This is test 1 in bar/baz`), + }, + { + "foo/bar/baz/backtick`.txt", + []byte(`Backtick filename`), + }, + { + "foo/bar/baz/\"quote\".txt", + []byte(`double quoted filename`), + }, + { + "foo/bar/baz/'quote'.txt", + []byte(`single quoted filename`), + }, + { + "foo/`/`/`.txt", + []byte(`Backticks everywhere!`), + }, + { + "foo/new\nline", + []byte("File with newline in name. Yes, this is possible."), + }, + { + "foo/fast{%template%}", + []byte("Fasttemplate"), + }, + { + "foo/fast{%template", + []byte("Fasttemplate open"), + }, + { + "foo/fast%}template", + []byte("Fasttemplate close"), + }, + { + "foo/fast{%dir%}/test.txt", + []byte("Fasttemplate directory"), + }, + { + "foo/fast{%dir/test.txt", + []byte("Fasttemplate directory open"), + }, + { + "foo/fast%}dir/test.txt", + []byte("Fasttemplate directory close"), + }, + { + "foo/fast{$%template%$}", + []byte("Fasttemplate double escaping"), + }, + } + withIgnoredFiles := append(sourceFiles, sourceFile{"foo/rice-box.go", []byte("package main\nfunc init() {\n}")}, sourceFile{"foo/_amd64.rice-box.syso", []byte{}}) + pkg, cleanup, err := setUpTestPkg("foobar", withIgnoredFiles) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + var buffer bytes.Buffer + + err = writeBoxesGo(pkg, &buffer) + if err != nil { + t.Error(err) + return + } + + t.Logf("Generated file: \n%s", buffer.String()) + + validateBoxFile(t, filepath.Join(pkg.Dir, "rice-box.go"), &buffer, sourceFiles) +} + +func TestEmbedGoEmpty(t *testing.T) { + sourceFiles := []sourceFile{ + { + "boxes.go", + []byte(`package main + +func main() { +} +`), + }, + } + pkg, cleanup, err := setUpTestPkg("foobar", sourceFiles) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + var buffer bytes.Buffer + + err = writeBoxesGo(pkg, &buffer) + if err != errEmptyBox { + t.Errorf("expected errEmptyBox, got %v", err) + return + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/embed-syso.go b/vendor/github.com/GeertJohan/go.rice/rice/embed-syso.go new file mode 100644 index 0000000000..85ba1adf81 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/embed-syso.go @@ -0,0 +1,206 @@ +package main + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/build" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "text/template" + + "github.com/GeertJohan/go.rice/embedded" + "github.com/akavel/rsrc/coff" +) + +const sysoBoxSuffix = ".rice-box.syso" + +type sizedReader struct { + *bytes.Reader +} + +func (s sizedReader) Size() int64 { + return int64(s.Len()) +} + +var tmplEmbeddedSysoHelper *template.Template + +func init() { + var err error + tmplEmbeddedSysoHelper, err = template.New("embeddedSysoHelper").Parse(`package {{.Package}} +// ############# GENERATED CODE ##################### +// ## This file was generated by the rice tool. +// ## Do not edit unless you know what you're doing. +// ################################################## + +// extern char _bricebox_{{.Symname}}[], _ericebox_{{.Symname}}; +// int get_{{.Symname}}_length() { +// return &_ericebox_{{.Symname}} - _bricebox_{{.Symname}}; +// } +import "C" +import ( + "bytes" + "encoding/gob" + "github.com/GeertJohan/go.rice/embedded" + "unsafe" +) + +func init() { + ptr := unsafe.Pointer(&C._bricebox_{{.Symname}}) + bts := C.GoBytes(ptr, C.get_{{.Symname}}_length()) + embeddedBox := &embedded.EmbeddedBox{} + err := gob.NewDecoder(bytes.NewReader(bts)).Decode(embeddedBox) + if err != nil { + panic("error decoding embedded box: "+err.Error()) + } + embeddedBox.Link() + embedded.RegisterEmbeddedBox(embeddedBox.Name, embeddedBox) +}`) + if err != nil { + panic("could not parse template embeddedSysoHelper: " + err.Error()) + } +} + +type embeddedSysoHelperData struct { + Package string + Symname string +} + +func operationEmbedSyso(pkg *build.Package) { + + regexpSynameReplacer := regexp.MustCompile(`[^a-z0-9_]`) + + boxMap := findBoxes(pkg) + + // notify user when no calls to rice.FindBox are made (is this an error and therefore os.Exit(1) ? + if len(boxMap) == 0 { + fmt.Println("no calls to rice.FindBox() found") + return + } + + verbosef("\n") + + for boxname := range boxMap { + // find path and filename for this box + boxPath := filepath.Join(pkg.Dir, boxname) + boxFilename := strings.Replace(boxname, "/", "-", -1) + boxFilename = strings.Replace(boxFilename, "..", "back", -1) + boxFilename = strings.Replace(boxFilename, ".", "-", -1) + + // verbose info + verbosef("embedding box '%s'\n", boxname) + verbosef("\tto file %s\n", boxFilename) + + // read box metadata + boxInfo, ierr := os.Stat(boxPath) + if ierr != nil { + fmt.Printf("Error: unable to access box at %s\n", boxPath) + os.Exit(1) + } + + // create box datastructure (used by template) + box := &embedded.EmbeddedBox{ + Name: boxname, + Time: boxInfo.ModTime(), + EmbedType: embedded.EmbedTypeSyso, + Files: make(map[string]*embedded.EmbeddedFile), + Dirs: make(map[string]*embedded.EmbeddedDir), + } + + // fill box datastructure with file data + filepath.Walk(boxPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + fmt.Printf("error walking box: %s\n", err) + os.Exit(1) + } + + filename := strings.TrimPrefix(path, boxPath) + filename = strings.Replace(filename, "\\", "/", -1) + filename = strings.TrimPrefix(filename, "/") + if info.IsDir() { + embeddedDir := &embedded.EmbeddedDir{ + Filename: filename, + DirModTime: info.ModTime(), + } + verbosef("\tincludes dir: '%s'\n", embeddedDir.Filename) + box.Dirs[embeddedDir.Filename] = embeddedDir + + // add tree entry (skip for root, it'll create a recursion) + if embeddedDir.Filename != "" { + pathParts := strings.Split(embeddedDir.Filename, "/") + parentDir := box.Dirs[strings.Join(pathParts[:len(pathParts)-1], "/")] + parentDir.ChildDirs = append(parentDir.ChildDirs, embeddedDir) + } + } else if !generated(filename) { + embeddedFile := &embedded.EmbeddedFile{ + Filename: filename, + FileModTime: info.ModTime(), + Content: "", + } + verbosef("\tincludes file: '%s'\n", embeddedFile.Filename) + contentBytes, err := ioutil.ReadFile(path) + if err != nil { + fmt.Printf("error reading file content while walking box: %s\n", err) + os.Exit(1) + } + embeddedFile.Content = string(contentBytes) + box.Files[embeddedFile.Filename] = embeddedFile + } + return nil + }) + + // encode embedded box to gob file + boxGobBuf := &bytes.Buffer{} + err := gob.NewEncoder(boxGobBuf).Encode(box) + if err != nil { + fmt.Printf("error encoding box to gob: %v\n", err) + os.Exit(1) + } + + verbosef("gob-encoded embeddedBox is %d bytes large\n", boxGobBuf.Len()) + + // write coff + symname := regexpSynameReplacer.ReplaceAllString(boxname, "_") + createCoffSyso(boxname, symname, "386", boxGobBuf.Bytes()) + createCoffSyso(boxname, symname, "amd64", boxGobBuf.Bytes()) + + // write go + sysoHelperData := embeddedSysoHelperData{ + Package: pkg.Name, + Symname: symname, + } + fileSysoHelper, err := os.Create(boxFilename + ".rice-box.go") + if err != nil { + fmt.Printf("error creating syso helper: %v\n", err) + os.Exit(1) + } + err = tmplEmbeddedSysoHelper.Execute(fileSysoHelper, sysoHelperData) + if err != nil { + fmt.Printf("error executing tmplEmbeddedSysoHelper: %v\n", err) + os.Exit(1) + } + } +} + +func createCoffSyso(boxFilename string, symname string, arch string, data []byte) { + boxCoff := coff.NewRDATA() + switch arch { + case "386": + case "amd64": + boxCoff.FileHeader.Machine = 0x8664 + default: + panic("invalid arch") + } + boxCoff.AddData("_bricebox_"+symname, sizedReader{bytes.NewReader(data)}) + boxCoff.AddData("_ericebox_"+symname, io.NewSectionReader(strings.NewReader("\000\000"), 0, 2)) // TODO: why? copied from rsrc, which copied it from as-generated + boxCoff.Freeze() + err := writeCoff(boxCoff, boxFilename+"_"+arch+sysoBoxSuffix) + if err != nil { + fmt.Printf("error writing %s coff/.syso: %v\n", arch, err) + os.Exit(1) + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/find.go b/vendor/github.com/GeertJohan/go.rice/rice/find.go new file mode 100644 index 0000000000..80f5d0868e --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/find.go @@ -0,0 +1,148 @@ +package main + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "os" + "path/filepath" + "strings" +) + +func badArgument(fileset *token.FileSet, p token.Pos) { + pos := fileset.Position(p) + filename := pos.Filename + base, err := os.Getwd() + if err == nil { + rpath, perr := filepath.Rel(base, pos.Filename) + if perr == nil { + filename = rpath + } + } + msg := fmt.Sprintf("%s:%d: Error: found call to rice.FindBox, "+ + "but argument must be a string literal.\n", filename, pos.Line) + fmt.Println(msg) + os.Exit(1) +} + +func findBoxes(pkg *build.Package) map[string]bool { + // create map of boxes to embed + var boxMap = make(map[string]bool) + + // create one list of files for this package + filenames := make([]string, 0, len(pkg.GoFiles)+len(pkg.CgoFiles)) + filenames = append(filenames, pkg.GoFiles...) + filenames = append(filenames, pkg.CgoFiles...) + + // loop over files, search for rice.FindBox(..) calls + for _, filename := range filenames { + // find full filepath + fullpath := filepath.Join(pkg.Dir, filename) + if strings.HasSuffix(filename, "rice-box.go") { + // Ignore *.rice-box.go files + verbosef("skipping file %q\n", fullpath) + continue + } + verbosef("scanning file %q\n", fullpath) + + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, fullpath, nil, 0) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + var riceIsImported bool + ricePkgName := "rice" + for _, imp := range f.Imports { + if strings.HasSuffix(imp.Path.Value, "go.rice\"") { + if imp.Name != nil { + ricePkgName = imp.Name.Name + } + riceIsImported = true + break + } + } + if !riceIsImported { + // Rice wasn't imported, so we won't find a box. + continue + } + if ricePkgName == "_" { + // Rice pkg is unnamed, so we won't find a box. + continue + } + + // Inspect AST, looking for calls to (Must)?FindBox. + // First parameter of the func must be a basic literal. + // Identifiers won't be resolved. + var nextIdentIsBoxFunc bool + var nextBasicLitParamIsBoxName bool + var boxCall token.Pos + var validVariablesForBoxes = make(map[string]bool) + + ast.Inspect(f, func(node ast.Node) bool { + if node == nil { + return false + } + switch x := node.(type) { + // this case fixes the var := func() style assignments, not assignments to vars declared separately from the assignment. + case *ast.AssignStmt: + var assign = node.(*ast.AssignStmt) + name, found := assign.Lhs[0].(*ast.Ident) + if found { + composite, first := assign.Rhs[0].(*ast.CompositeLit) + if first { + riceSelector, second := composite.Type.(*ast.SelectorExpr) + + if second { + callCorrect := riceSelector.Sel.Name == "Config" + packageName, third := riceSelector.X.(*ast.Ident) + + if third && callCorrect && packageName.Name == ricePkgName { + validVariablesForBoxes[name.Name] = true + verbosef("\tfound variable, saving to scan for boxes: %q\n", name.Name) + } + } + } + } + case *ast.Ident: + if nextIdentIsBoxFunc || ricePkgName == "." { + nextIdentIsBoxFunc = false + if x.Name == "FindBox" || x.Name == "MustFindBox" { + nextBasicLitParamIsBoxName = true + boxCall = x.Pos() + } + } else { + if x.Name == ricePkgName || validVariablesForBoxes[x.Name] { + nextIdentIsBoxFunc = true + } + } + case *ast.BasicLit: + if nextBasicLitParamIsBoxName { + if x.Kind == token.STRING { + nextBasicLitParamIsBoxName = false + // trim "" or `` + name := x.Value[1 : len(x.Value)-1] + boxMap[name] = true + verbosef("\tfound box %q\n", name) + } else { + badArgument(fset, boxCall) + } + } + + default: + if nextIdentIsBoxFunc { + nextIdentIsBoxFunc = false + } + if nextBasicLitParamIsBoxName { + badArgument(fset, boxCall) + } + } + return true + }) + } + + return boxMap +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/find_test.go b/vendor/github.com/GeertJohan/go.rice/rice/find_test.go new file mode 100644 index 0000000000..e8f9deef5e --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/find_test.go @@ -0,0 +1,268 @@ +package main + +import ( + "fmt" + "testing" +) + +func expectBoxes(expected []string, actual map[string]bool) error { + if len(expected) != len(actual) { + return fmt.Errorf("expected %v, got %v", expected, actual) + } + for _, box := range expected { + if _, ok := actual[box]; !ok { + return fmt.Errorf("expected %v, got %v", expected, actual) + } + } + return nil +} + +func TestFindOneBox(t *testing.T) { + pkg, cleanup, err := setUpTestPkg("foobar", []sourceFile{ + { + "boxes.go", + []byte(`package main + +import ( + "github.com/GeertJohan/go.rice" +) + +func main() { + rice.MustFindBox("foo") +} +`), + }, + }) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + expectedBoxes := []string{"foo"} + boxMap := findBoxes(pkg) + if err := expectBoxes(expectedBoxes, boxMap); err != nil { + t.Error(err) + } +} + +func TestFindOneBoxViaVariable(t *testing.T) { + + pkg, cleanup, err := setUpTestPkg("foobar", []sourceFile{ + { + "boxes.go", + []byte(`package main + +import ( + "github.com/GeertJohan/go.rice" +) + +func main() { + conf := rice.Config{ + LocateOrder: []rice.LocateMethod{rice.LocateEmbedded, rice.LocateAppended, rice.LocateFS}, + } + conf.MustFindBox("foo") +} +`), + }, + }) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + expectedBoxes := []string{"foo"} + boxMap := findBoxes(pkg) + if err := expectBoxes(expectedBoxes, boxMap); err != nil { + t.Error(err) + } +} + +func TestFindMultipleBoxes(t *testing.T) { + pkg, cleanup, err := setUpTestPkg("foobar", []sourceFile{ + { + "boxes.go", + []byte(`package main + +import ( + "github.com/GeertJohan/go.rice" +) + +func main() { + rice.MustFindBox("foo") + rice.MustFindBox("bar") +} +`), + }, + }) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + expectedBoxes := []string{"foo", "bar"} + boxMap := findBoxes(pkg) + if err := expectBoxes(expectedBoxes, boxMap); err != nil { + t.Error(err) + } +} + +func TestNoBoxFoundIfRiceNotImported(t *testing.T) { + pkg, cleanup, err := setUpTestPkg("foobar", []sourceFile{ + { + "boxes.go", + []byte(`package main +type fakerice struct {} + +func (fr fakerice) FindBox(s string) { +} + +func main() { + rice := fakerice{} + rice.FindBox("foo") +} +`), + }, + }) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + boxMap := findBoxes(pkg) + if _, ok := boxMap["foo"]; ok { + t.Errorf("Unexpected box %q was found", "foo") + } +} + +func TestUnrelatedBoxesAreNotFound(t *testing.T) { + pkg, cleanup, err := setUpTestPkg("foobar", []sourceFile{ + { + "boxes.go", + []byte(`package foobar + +import ( + _ "github.com/GeertJohan/go.rice" +) + +type fakerice struct {} + +func (fr fakerice) FindBox(s string) { +} + +func FindBox(s string) { + +} + +func LoadBoxes() { + rice := fakerice{} + rice.FindBox("foo") + + FindBox("bar") +} +`), + }, + }) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + boxMap := findBoxes(pkg) + for _, box := range []string{"foo", "bar"} { + if _, ok := boxMap[box]; ok { + t.Errorf("Unexpected box %q was found", box) + } + } +} + +func TestMixGoodAndBadBoxes(t *testing.T) { + pkg, cleanup, err := setUpTestPkg("foobar", []sourceFile{ + { + "boxes1.go", + []byte(`package foobar + +import ( + _ "github.com/GeertJohan/go.rice" +) + +type fakerice struct {} + +func (fr fakerice) FindBox(s string) { +} + +func FindBox(s string) { + +} + +func LoadBoxes1() { + rice := fakerice{} + rice.FindBox("foo") + + FindBox("bar") +} +`), + }, + { + "boxes2.go", + []byte(`package foobar + +import ( + noodles "github.com/GeertJohan/go.rice" +) + +func LoadBoxes2() { + FindBox("baz") + noodles.FindBox("veggies") +} +`), + }, + { + "boxes3.go", + []byte(`package foobar + +import ( + "github.com/GeertJohan/go.rice" +) + +func LoadBoxes3() { + rice.FindBox("fish") +} +`), + }, + { + "boxes4.go", + []byte(`package foobar + +import ( + . "github.com/GeertJohan/go.rice" +) + +func LoadBoxes3() { + MustFindBox("chicken") +} +`), + }, + }) + defer cleanup() + if err != nil { + t.Error(err) + return + } + + boxMap := findBoxes(pkg) + for _, box := range []string{"foo", "bar", "baz"} { + if _, ok := boxMap[box]; ok { + t.Errorf("Unexpected box %q was found", box) + } + } + for _, box := range []string{"veggies", "fish", "chicken"} { + if _, ok := boxMap[box]; !ok { + t.Errorf("Expected box %q not found", box) + } + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/flags.go b/vendor/github.com/GeertJohan/go.rice/rice/flags.go new file mode 100644 index 0000000000..96620b93f9 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/flags.go @@ -0,0 +1,84 @@ +package main + +import ( + "fmt" + "go/build" + "os" + + goflags "github.com/jessevdk/go-flags" // rename import to `goflags` (file scope) so we can use `var flags` (package scope) +) + +// flags +var flags struct { + MemProfile string `long:"memprofile" description:"Write memory profile to this file"` + CpuProfile string `long:"cpuprofile" description:"Write cpu profile to this file"` + Verbose bool `long:"verbose" short:"v" description:"Show verbose debug information"` + ImportPaths []string `long:"import-path" short:"i" description:"Import path(s) to use. Using PWD when left empty. Specify multiple times for more import paths to append"` + + Append struct { + Executable string `long:"exec" description:"Executable to append" required:"true"` + } `command:"append"` + + EmbedGo struct{} `command:"embed-go" alias:"embed"` + EmbedSyso struct{} `command:"embed-syso"` + Clean struct{} `command:"clean"` + + Tags []string `long:"tags" description:"Tags to use with the implicit go build"` +} + +// flags parser +var flagsParser *goflags.Parser + +// initFlags parses the given flags. +// when the user asks for help (-h or --help): the application exists with status 0 +// when unexpected flags is given: the application exits with status 1 +func parseArguments() { + // create flags parser in global var, for flagsParser.Active.Name (operation) + flagsParser = goflags.NewParser(&flags, goflags.Default) + + // parse flags + args, err := flagsParser.Parse() + if err != nil { + // assert the err to be a flags.Error + flagError := err.(*goflags.Error) + if flagError.Type == goflags.ErrHelp { + // user asked for help on flags. + // program can exit successfully + os.Exit(0) + } + if flagError.Type == goflags.ErrUnknownFlag { + fmt.Println("Use --help to view available options.") + os.Exit(1) + } + if flagError.Type == goflags.ErrRequired { + os.Exit(1) + } + fmt.Printf("Error parsing flags: %s\n", err) + os.Exit(1) + } + + // error on left-over arguments + if len(args) > 0 { + fmt.Printf("Unexpected arguments: %s\nUse --help to view available options.", args) + os.Exit(1) + } + + // default ImportPath to pwd when not set + if len(flags.ImportPaths) == 0 { + pwd, err := os.Getwd() + if err != nil { + fmt.Printf("error getting pwd: %s\n", err) + os.Exit(1) + } + verbosef("using pwd as import path\n") + // find non-absolute path for this pwd + pkg, err := build.ImportDir(pwd, build.FindOnly) + if err != nil { + fmt.Printf("error using current directory as import path: %s\n", err) + os.Exit(1) + } + flags.ImportPaths = append(flags.ImportPaths, pkg.ImportPath) + verbosef("using import paths: %s\n", flags.ImportPaths) + return + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/helpers_test.go b/vendor/github.com/GeertJohan/go.rice/rice/helpers_test.go new file mode 100644 index 0000000000..53989ed251 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/helpers_test.go @@ -0,0 +1,646 @@ +package main + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "testing" +) + +type sourceFile struct { + Name string + Contents []byte +} + +type registeredDir struct { + Filename string + ModTime int + ChildFiles []*registeredFile + ChildDirs []*registeredDir +} + +type registeredFile struct { + Filename string + ModTime int + Content string +} + +type registeredBox struct { + Name string + Time int + // key is path + Dirs map[string]*registeredDir + // key is path + Files map[string]*registeredFile +} + +func setUpTestPkg(pkgName string, files []sourceFile) (*build.Package, func(), error) { + temp, err := ioutil.TempDir("", "go.rice-test") + if err != nil { + return nil, func() {}, err + } + cleanup := func() { + os.RemoveAll(temp) + } + dir := filepath.Join(temp, pkgName) + if err := os.Mkdir(dir, 0770); err != nil { + return nil, cleanup, err + } + for _, f := range files { + fullPath := filepath.Join(dir, f.Name) + if err := os.MkdirAll(filepath.Dir(fullPath), 0770); err != nil { + return nil, cleanup, err + } + if err := ioutil.WriteFile(fullPath, f.Contents, 0660); err != nil { + return nil, cleanup, err + } + } + pkg, err := build.ImportDir(dir, 0) + return pkg, cleanup, err +} + +// isSimpleSelector returns true if expr is pkgName.ident +func isSimpleSelector(pkgName, ident string, expr ast.Expr) bool { + if sel, ok := expr.(*ast.SelectorExpr); ok { + if pkgIdent, ok := sel.X.(*ast.Ident); ok && pkgIdent.Name == pkgName && sel.Sel != nil && sel.Sel.Name == ident { + return true + } + } + return false +} + +func isIdent(ident string, expr ast.Expr) bool { + if expr, ok := expr.(*ast.Ident); ok && expr.Name == ident { + return true + } + return false +} + +func getIdentName(expr ast.Expr) (string, bool) { + if expr, ok := expr.(*ast.Ident); ok { + return expr.Name, true + } + return "", false +} + +func getKey(expr *ast.KeyValueExpr) string { + if ident, ok := expr.Key.(*ast.Ident); ok { + return ident.Name + } + return "" +} + +// parseModTime parses a time.Unix call, and returns the unix time. +func parseModTime(expr ast.Expr) (int, error) { + if expr, ok := expr.(*ast.CallExpr); ok { + if !isSimpleSelector("time", "Unix", expr.Fun) { + return 0, fmt.Errorf("ModTime is not time.Unix: %#v", expr.Fun) + } + if len(expr.Args) == 0 { + return 0, fmt.Errorf("not enough args to time.Unix") + } + arg0 := expr.Args[0] + if lit, ok := arg0.(*ast.BasicLit); ok && lit.Kind == token.INT { + return strconv.Atoi(lit.Value) + } + } + return 0, fmt.Errorf("not time.Unix: %#v", expr) +} + +func parseString(expr ast.Expr) (string, error) { + if expr, ok := expr.(*ast.CallExpr); ok && isIdent("string", expr.Fun) && len(expr.Args) == 1 { + return parseString(expr.Args[0]) + } + if lit, ok := expr.(*ast.BasicLit); ok && lit.Kind == token.STRING { + return strconv.Unquote(lit.Value) + } + return "", fmt.Errorf("not string: %#v", expr) +} + +// parseDir parses an embedded.EmbeddedDir literal. +// It can be either a variable name or a composite literal. +// Returns nil if the literal is not embedded.EmbeddedDir. +func parseDir(expr ast.Expr, dirs map[string]*registeredDir, files map[string]*registeredFile) (*registeredDir, []error) { + + if varName, ok := getIdentName(expr); ok { + dir, ok := dirs[varName] + if !ok { + return nil, []error{fmt.Errorf("unknown variable %v", varName)} + } + return dir, nil + } + + lit, ok := expr.(*ast.CompositeLit) + if !ok { + return nil, []error{fmt.Errorf("dir is not a composite literal: %#v", expr)} + } + + var errors []error + if !isSimpleSelector("embedded", "EmbeddedDir", lit.Type) { + return nil, nil + } + ret := ®isteredDir{} + for _, el := range lit.Elts { + if el, ok := el.(*ast.KeyValueExpr); ok { + key := getKey(el) + if key == "" { + continue + } + switch key { + case "DirModTime": + var err error + ret.ModTime, err = parseModTime(el.Value) + if err != nil { + errors = append(errors, fmt.Errorf("DirModTime %s", err)) + } + case "Filename": + var err error + ret.Filename, err = parseString(el.Value) + if err != nil { + errors = append(errors, fmt.Errorf("Filename %s", err)) + } + case "ChildDirs": + var errors2 []error + ret.ChildDirs, errors2 = parseDirsSlice(el.Value, dirs, files) + errors = append(errors, errors2...) + case "ChildFiles": + var errors2 []error + ret.ChildFiles, errors2 = parseFilesSlice(el.Value, files) + errors = append(errors, errors2...) + default: + errors = append(errors, fmt.Errorf("Unknown field: %v: %#v", key, el.Value)) + } + } + } + return ret, errors +} + +// parseFile parses an embedded.EmbeddedFile literal. +// It can be either a variable name or a composite literal. +// Returns nil if the literal is not embedded.EmbeddedFile. +func parseFile(expr ast.Expr, files map[string]*registeredFile) (*registeredFile, []error) { + if varName, ok := getIdentName(expr); ok { + file, ok := files[varName] + if !ok { + return nil, []error{fmt.Errorf("unknown variable %v", varName)} + } + return file, nil + } + + lit, ok := expr.(*ast.CompositeLit) + if !ok { + return nil, []error{fmt.Errorf("file is not a composite literal: %#v", expr)} + } + + var errors []error + if !isSimpleSelector("embedded", "EmbeddedFile", lit.Type) { + return nil, nil + } + ret := ®isteredFile{} + for _, el := range lit.Elts { + if el, ok := el.(*ast.KeyValueExpr); ok { + key := getKey(el) + if key == "" { + continue + } + switch key { + case "FileModTime": + var err error + ret.ModTime, err = parseModTime(el.Value) + if err != nil { + errors = append(errors, fmt.Errorf("DirModTime %s", err)) + } + case "Filename": + var err error + ret.Filename, err = parseString(el.Value) + if err != nil { + errors = append(errors, fmt.Errorf("Filename %s", err)) + } + case "Content": + var err error + ret.Content, err = parseString(el.Value) + if err != nil { + errors = append(errors, fmt.Errorf("Content %s", err)) + } + default: + errors = append(errors, fmt.Errorf("Unknown field: %v: %#v", key, el.Value)) + } + } + } + return ret, errors +} + +func parseRegistration(lit *ast.CompositeLit, dirs map[string]*registeredDir, files map[string]*registeredFile) (*registeredBox, []error) { + var errors []error + if !isSimpleSelector("embedded", "EmbeddedBox", lit.Type) { + return nil, nil + } + ret := ®isteredBox{ + Dirs: make(map[string]*registeredDir), + Files: make(map[string]*registeredFile), + } + for _, el := range lit.Elts { + if el, ok := el.(*ast.KeyValueExpr); ok { + key := getKey(el) + if key == "" { + continue + } + switch key { + case "Time": + var err error + ret.Time, err = parseModTime(el.Value) + if err != nil { + errors = append(errors, fmt.Errorf("Time %s", err)) + } + case "Name": + var err error + ret.Name, err = parseString(el.Value) + if err != nil { + errors = append(errors, fmt.Errorf("Name %s", err)) + } + case "Dirs": + var errors2 []error + ret.Dirs, errors2 = parseDirsMap(el.Value, dirs, files) + errors = append(errors, errors2...) + case "Files": + var errors2 []error + ret.Files, errors2 = parseFilesMap(el.Value, files) + errors = append(errors, errors2...) + default: + errors = append(errors, fmt.Errorf("Unknown field: %v: %#v", key, el.Value)) + } + } + } + return ret, errors +} + +func parseDirsSlice(expr ast.Expr, dirs map[string]*registeredDir, files map[string]*registeredFile) (childDirs []*registeredDir, errors []error) { + valid := false + lit, ok := expr.(*ast.CompositeLit) + if ok { + if arrType, ok := lit.Type.(*ast.ArrayType); ok { + if star, ok := arrType.Elt.(*ast.StarExpr); ok { + if isSimpleSelector("embedded", "EmbeddedDir", star.X) { + valid = true + } + } + } + } + + if !valid { + return nil, []error{fmt.Errorf("not a []*embedded.EmbeddedDir: %#v", expr)} + } + for _, el := range lit.Elts { + child, childErrors := parseDir(el, dirs, files) + errors = append(errors, childErrors...) + childDirs = append(childDirs, child) + } + return +} + +func parseFilesSlice(expr ast.Expr, files map[string]*registeredFile) (childFiles []*registeredFile, errors []error) { + valid := false + lit, ok := expr.(*ast.CompositeLit) + if ok { + if arrType, ok := lit.Type.(*ast.ArrayType); ok { + if star, ok := arrType.Elt.(*ast.StarExpr); ok { + if isSimpleSelector("embedded", "EmbeddedFile", star.X) { + valid = true + } + } + } + } + + if !valid { + return nil, []error{fmt.Errorf("not a []*embedded.EmbeddedFile: %#v", expr)} + } + for _, el := range lit.Elts { + child, childErrors := parseFile(el, files) + errors = append(errors, childErrors...) + childFiles = append(childFiles, child) + } + return +} + +func parseDirsMap(expr ast.Expr, dirs map[string]*registeredDir, files map[string]*registeredFile) (childDirs map[string]*registeredDir, errors []error) { + valid := false + lit, ok := expr.(*ast.CompositeLit) + if ok { + if mapType, ok := lit.Type.(*ast.MapType); ok { + if star, ok := mapType.Value.(*ast.StarExpr); ok { + if isSimpleSelector("embedded", "EmbeddedDir", star.X) && isIdent("string", mapType.Key) { + valid = true + } + } + } + } + + if !valid { + return nil, []error{fmt.Errorf("not a map[string]*embedded.EmbeddedDir: %#v", expr)} + } + childDirs = make(map[string]*registeredDir) + for _, el := range lit.Elts { + kv, ok := el.(*ast.KeyValueExpr) + if !ok { + errors = append(errors, fmt.Errorf("not a KeyValueExpr: %#v", el)) + continue + } + key, err := parseString(kv.Key) + if err != nil { + errors = append(errors, fmt.Errorf("key %s", err)) + continue + } + + child, childErrors := parseDir(kv.Value, dirs, files) + errors = append(errors, childErrors...) + childDirs[key] = child + } + return +} + +func parseFilesMap(expr ast.Expr, files map[string]*registeredFile) (childFiles map[string]*registeredFile, errors []error) { + valid := false + lit, ok := expr.(*ast.CompositeLit) + if ok { + if mapType, ok := lit.Type.(*ast.MapType); ok { + if star, ok := mapType.Value.(*ast.StarExpr); ok { + if isSimpleSelector("embedded", "EmbeddedFile", star.X) && isIdent("string", mapType.Key) { + valid = true + } + } + } + } + + if !valid { + return nil, []error{fmt.Errorf("not a map[string]*embedded.EmbeddedFile: %#v", expr)} + } + childFiles = make(map[string]*registeredFile) + for _, el := range lit.Elts { + kv, ok := el.(*ast.KeyValueExpr) + if !ok { + errors = append(errors, fmt.Errorf("not a KeyValueExpr: %#v", el)) + continue + } + key, err := parseString(kv.Key) + if err != nil { + errors = append(errors, fmt.Errorf("key %s", err)) + continue + } + + child, childErrors := parseFile(kv.Value, files) + errors = append(errors, childErrors...) + childFiles[key] = child + } + return +} + +// unpoint returns the expression expr points to +// if expr is a & unary expression. +func unpoint(expr ast.Expr) ast.Expr { + if expr, ok := expr.(*ast.UnaryExpr); ok { + if expr.Op == token.AND { + return expr.X + } + } + return expr +} + +func validateBoxFile(t *testing.T, filename string, src io.Reader, sourceFiles []sourceFile) { + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, filename, src, 0) + if err != nil { + t.Error(err) + return + } + + var initFunc *ast.FuncDecl + for _, decl := range f.Decls { + if decl, ok := decl.(*ast.FuncDecl); ok && decl.Name != nil && decl.Name.Name == "init" { + initFunc = decl + break + } + } + if initFunc == nil { + t.Fatal("init function not found in generated file") + } + if initFunc.Body == nil { + t.Fatal("init function has no body in generated file") + } + var registrations []*ast.CallExpr + directories := make(map[string]*registeredDir) + files := make(map[string]*registeredFile) + _ = directories + _ = files + for _, stmt := range initFunc.Body.List { + if stmt, ok := stmt.(*ast.ExprStmt); ok { + if call, ok := stmt.X.(*ast.CallExpr); ok { + registrations = append(registrations, call) + } + continue + } + if stmt, ok := stmt.(*ast.AssignStmt); ok { + for i, rhs := range stmt.Rhs { + // Rhs can be EmbeddedDir or EmbeddedFile. + var literal *ast.CompositeLit + literal, ok := unpoint(rhs).(*ast.CompositeLit) + if !ok { + continue + } + if lhs, ok := stmt.Lhs[i].(*ast.Ident); ok { + // variable + edir, direrrs := parseDir(literal, directories, files) + efile, fileerrs := parseFile(literal, files) + abort := false + for _, err := range direrrs { + t.Error("error while parsing dir: ", err) + abort = true + } + for _, err := range fileerrs { + t.Error("error while parsing file: ", err) + abort = true + } + if abort { + return + } + + if edir == nil && efile == nil { + continue + } + if edir != nil { + directories[lhs.Name] = edir + } else { + files[lhs.Name] = efile + } + } else if lhs, ok := stmt.Lhs[i].(*ast.SelectorExpr); ok { + selName, ok := getIdentName(lhs.Sel) + if !ok || selName != "ChildDirs" { + continue + } + varName, ok := getIdentName(lhs.X) + if !ok { + t.Fatalf("cannot parse ChildDirs assignment: %#v", lhs) + } + dir, ok := directories[varName] + if !ok { + t.Fatalf("variable %v not found", varName) + } + + var errors []error + dir.ChildDirs, errors = parseDirsSlice(rhs, directories, files) + + abort := false + for _, err := range errors { + t.Errorf("error parsing child dirs: %s", err) + abort = true + } + if abort { + return + } + } + } + } + } + if len(registrations) == 0 { + t.Fatal("could not find registration of embedded box") + } + + boxes := make(map[string]*registeredBox) + + for _, call := range registrations { + if isSimpleSelector("embedded", "RegisterEmbeddedBox", call.Fun) { + if len(call.Args) != 2 { + t.Fatalf("incorrect arguments to embedded.RegisterEmbeddedBox: %#v", call.Args) + } + boxArg := unpoint(call.Args[1]) + name, err := parseString(call.Args[0]) + if err != nil { + t.Fatalf("first argument to embedded.RegisterEmbeddedBox incorrect: %s", err) + } + boxLit, ok := boxArg.(*ast.CompositeLit) + if !ok { + t.Fatalf("second argument to embedded.RegisterEmbeddedBox is not a composite literal: %#v", boxArg) + } + abort := false + box, errors := parseRegistration(boxLit, directories, files) + for _, err := range errors { + t.Error("error while parsing box: ", err) + abort = true + } + if abort { + return + } + if box == nil { + t.Fatalf("second argument to embedded.RegisterEmbeddedBox is not an embedded.EmbeddedBox: %#v", boxArg) + } + if box.Name != name { + t.Fatalf("first argument to embedded.RegisterEmbeddedBox is not the same as the name in the second argument: %v, %#v", name, boxArg) + } + boxes[name] = box + } + } + + // Validate that all boxes are present. + if _, ok := boxes["foo"]; !ok { + t.Error("box \"foo\" not found") + } + for _, box := range boxes { + validateBox(t, box, sourceFiles) + } +} + +func validateBox(t *testing.T, box *registeredBox, files []sourceFile) { + dirsToBeChecked := make(map[string]struct{}) + filesToBeChecked := make(map[string]string) + for _, file := range files { + if !strings.HasPrefix(file.Name, box.Name) { + continue + } + pathParts := strings.Split(file.Name, "/") + dirs := pathParts[:len(pathParts)-1] + dirPath := "" + for _, dir := range dirs { + if dir != box.Name { + dirPath = path.Join(dirPath, dir) + } + dirsToBeChecked[dirPath] = struct{}{} + } + filesToBeChecked[path.Join(dirPath, pathParts[len(pathParts)-1])] = string(file.Contents) + } + + if len(box.Files) != len(filesToBeChecked) { + t.Errorf("box %v has incorrect number of files; expected %v, got %v", box.Name, len(filesToBeChecked), len(box.Files)) + } + + if len(box.Dirs) != len(dirsToBeChecked) { + t.Errorf("box %v has incorrect number of dirs; expected %v, got %v", box.Name, len(dirsToBeChecked), len(box.Dirs)) + } + + for name, content := range filesToBeChecked { + f, ok := box.Files[name] + if !ok { + t.Errorf("file %v not present in box %v", name, box.Name) + continue + } + if f.Filename != name { + t.Errorf("box %v: filename mismatch: key: %v; Filename: %v", box.Name, name, f.Filename) + } + if f.Content != content { + t.Errorf("box %v: file %v content does not match: got %v, expected %v", box.Name, name, f.Content, content) + } + dirPath, _ := path.Split(name) + dirPath = strings.TrimSuffix(dirPath, "/") + dir, ok := box.Dirs[dirPath] + if !ok { + t.Errorf("directory %v not present in box %v", dirPath, box.Name) + continue + } + found := false + for _, file := range dir.ChildFiles { + if file == f { + found = true + } + } + if !found { + t.Errorf("file %v not found in directory %v in box %v", name, dirPath, box.Name) + continue + } + } + for name := range dirsToBeChecked { + d, ok := box.Dirs[name] + if !ok { + t.Errorf("directory %v not present in box %v", name, box.Name) + continue + } + if d.Filename != name { + t.Errorf("box %v: filename mismatch: key: %v; Filename: %v", box.Name, name, d.Filename) + } + if name != "" { + dirPath, _ := path.Split(name) + dirPath = strings.TrimSuffix(dirPath, "/") + dir, ok := box.Dirs[dirPath] + if !ok { + t.Errorf("directory %v not present in box %v", dirPath, box.Name) + continue + } + found := false + for _, dir := range dir.ChildDirs { + if dir == d { + found = true + } + } + if !found { + t.Errorf("directory %v not found in directory %v in box %v", name, dirPath, box.Name) + continue + } + } + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/identifier.go b/vendor/github.com/GeertJohan/go.rice/rice/identifier.go new file mode 100644 index 0000000000..445ee7daa4 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/identifier.go @@ -0,0 +1,14 @@ +package main + +import ( + "strconv" + + "github.com/GeertJohan/go.incremental" +) + +var identifierCount incremental.Uint64 + +func nextIdentifier() string { + num := identifierCount.Next() + return strconv.FormatUint(num, 36) // 0123456789abcdefghijklmnopqrstuvwxyz +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/main.go b/vendor/github.com/GeertJohan/go.rice/rice/main.go new file mode 100644 index 0000000000..b9979a93a0 --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/main.go @@ -0,0 +1,88 @@ +package main + +import ( + "fmt" + "go/build" + "log" + "os" + "runtime/pprof" +) + +func main() { + // parser arguments + parseArguments() + + if flags.CpuProfile != "" { + f, err := os.Create(flags.CpuProfile) + if err != nil { + log.Fatal(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + // find package for path + var pkgs []*build.Package + for _, importPath := range flags.ImportPaths { + pkg := pkgForPath(importPath) + pkg.AllTags = flags.Tags + pkgs = append(pkgs, pkg) + } + + // switch on the operation to perform + switch flagsParser.Active.Name { + case "embed", "embed-go": + for _, pkg := range pkgs { + operationEmbedGo(pkg) + } + case "embed-syso": + log.Println("WARNING: embedding .syso is experimental..") + for _, pkg := range pkgs { + operationEmbedSyso(pkg) + } + case "append": + operationAppend(pkgs) + case "clean": + for _, pkg := range pkgs { + operationClean(pkg) + } + } + + // all done + verbosef("\n") + verbosef("rice finished successfully\n") + + if flags.MemProfile != "" { + f, err := os.Create(flags.MemProfile) + if err != nil { + log.Fatal(err) + } + pprof.WriteHeapProfile(f) + f.Close() + } +} + +// helper function to get *build.Package for given path +func pkgForPath(path string) *build.Package { + // get pwd for relative imports + pwd, err := os.Getwd() + if err != nil { + fmt.Printf("error getting pwd (required for relative imports): %s\n", err) + os.Exit(1) + } + + // read full package information + pkg, err := build.Import(path, pwd, 0) + if err != nil { + fmt.Printf("error reading package: %s\n", err) + os.Exit(1) + } + + return pkg +} + +func verbosef(format string, stuff ...interface{}) { + if flags.Verbose { + log.Printf(format, stuff...) + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/templates.go b/vendor/github.com/GeertJohan/go.rice/rice/templates.go new file mode 100644 index 0000000000..7381db7d1e --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/templates.go @@ -0,0 +1,177 @@ +package main + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "text/template" + + "github.com/nkovacs/streamquote" + "github.com/valyala/fasttemplate" +) + +var ( + tmplEmbeddedBox *template.Template + tagEscaper, tagUnescaper *strings.Replacer +) + +const ( + unescapeTag = "unescape:" + injectTag = "injectfile:" +) + +func init() { + var err error + + // $ is used as the escaping character, + // because it has no special meaning in go strings, + // so it won't be changed by strconv.Quote. + replacements := []string{"$", "$$", "{%", "{$%", "%}", "%$}"} + reverseReplacements := make([]string, len(replacements)) + l := len(reverseReplacements) - 1 + for i := range replacements { + reverseReplacements[l-i] = replacements[i] + } + tagEscaper = strings.NewReplacer(replacements...) + tagUnescaper = strings.NewReplacer(reverseReplacements...) + + // parse embedded box template + tmplEmbeddedBox, err = template.New("embeddedBox").Funcs(template.FuncMap{ + "tagescape": func(s string) string { + return fmt.Sprintf("{%%%v%v%%}", unescapeTag, tagEscaper.Replace(s)) + }, + "injectfile": func(s string) string { + return fmt.Sprintf("{%%%v%v%%}", injectTag, tagEscaper.Replace(s)) + }, + }).Parse(`package {{.Package}} + +import ( + "time" + + "github.com/GeertJohan/go.rice/embedded" +) + +{{range .Boxes}} +func init() { + + // define files + {{range .Files}}{{.Identifier}} := &embedded.EmbeddedFile{ + Filename: {{.FileName | tagescape | printf "%q"}}, + FileModTime: time.Unix({{.ModTime}}, 0), + + Content: string({{.Path | injectfile | printf "%q"}}), + } + {{end}} + + // define dirs + {{range .Dirs}}{{.Identifier}} := &embedded.EmbeddedDir{ + Filename: {{.FileName | tagescape | printf "%q"}}, + DirModTime: time.Unix({{.ModTime}}, 0), + ChildFiles: []*embedded.EmbeddedFile{ + {{range .ChildFiles}}{{.Identifier}}, // {{.FileName | tagescape | printf "%q"}} + {{end}} + }, + } + {{end}} + + // link ChildDirs + {{range .Dirs}}{{.Identifier}}.ChildDirs = []*embedded.EmbeddedDir{ + {{range .ChildDirs}}{{.Identifier}}, // {{.FileName | tagescape | printf "%q"}} + {{end}} + } + {{end}} + + // register embeddedBox + embedded.RegisterEmbeddedBox(` + "`" + `{{.BoxName}}` + "`" + `, &embedded.EmbeddedBox{ + Name: ` + "`" + `{{.BoxName}}` + "`" + `, + Time: time.Unix({{.UnixNow}}, 0), + Dirs: map[string]*embedded.EmbeddedDir{ + {{range .Dirs}}{{.FileName | tagescape | printf "%q"}}: {{.Identifier}}, + {{end}} + }, + Files: map[string]*embedded.EmbeddedFile{ + {{range .Files}}{{.FileName | tagescape | printf "%q"}}: {{.Identifier}}, + {{end}} + }, + }) +} +{{end}}`) + if err != nil { + fmt.Printf("error parsing embedded box template: %s\n", err) + os.Exit(-1) + } +} + +// embeddedBoxFasttemplate will inject file contents and unescape {% and %}. +func embeddedBoxFasttemplate(w io.Writer, src string) error { + ft, err := fasttemplate.NewTemplate(src, "{%", "%}") + if err != nil { + return fmt.Errorf("error compiling fasttemplate: %s\n", err) + } + + converter := streamquote.New() + + _, err = ft.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { + if strings.HasPrefix(tag, unescapeTag) { + tag = strings.TrimPrefix(tag, unescapeTag) + return w.Write([]byte(tagUnescaper.Replace(tag))) + } + if !strings.HasPrefix(tag, injectTag) { + return 0, fmt.Errorf("invalid fasttemplate tag: %v", tag) + } + tag = strings.TrimPrefix(tag, injectTag) + + fileName, err := strconv.Unquote("\"" + tag + "\"") + if err != nil { + return 0, fmt.Errorf("error unquoting filename %v: %v\n", tag, err) + } + f, err := os.Open(tagUnescaper.Replace(fileName)) + if err != nil { + return 0, fmt.Errorf("error opening file %v: %v\n", fileName, err) + } + + n, err := converter.Convert(f, w) + + f.Close() + if err != nil { + return n, fmt.Errorf("error converting file %v: %v\n", fileName, err) + } + + return n, nil + }) + if err != nil { + return fmt.Errorf("error executing fasttemplate: %s\n", err) + } + + return nil +} + +type embedFileDataType struct { + Package string + Boxes []*boxDataType +} + +type boxDataType struct { + BoxName string + UnixNow int64 + Files []*fileDataType + Dirs map[string]*dirDataType +} + +type fileDataType struct { + Identifier string + FileName string + Path string + ModTime int64 +} + +type dirDataType struct { + Identifier string + FileName string + Content []byte + ModTime int64 + ChildDirs []*dirDataType + ChildFiles []*fileDataType +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/util.go b/vendor/github.com/GeertJohan/go.rice/rice/util.go new file mode 100644 index 0000000000..863dca852d --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/util.go @@ -0,0 +1,31 @@ +package main + +import ( + "math/rand" + "path/filepath" + "strings" + "time" +) + +// generated tests if a filename was generated by rice +func generated(filename string) bool { + return filepath.Base(filename) == boxFilename || + strings.HasSuffix(filename, "."+boxFilename) || + strings.HasSuffix(filename, sysoBoxSuffix) +} + +// randomString generates a pseudo-random alpha-numeric string with given length. +func randomString(length int) string { + rand.Seed(time.Now().UnixNano()) + k := make([]rune, length) + for i := 0; i < length; i++ { + c := rand.Intn(35) + if c < 10 { + c += 48 // numbers (0-9) (0+48 == 48 == '0', 9+48 == 57 == '9') + } else { + c += 87 // lower case alphabets (a-z) (10+87 == 97 == 'a', 35+87 == 122 = 'z') + } + k[i] = rune(c) + } + return string(k) +} diff --git a/vendor/github.com/GeertJohan/go.rice/rice/writecoff.go b/vendor/github.com/GeertJohan/go.rice/rice/writecoff.go new file mode 100644 index 0000000000..0c12c0ffcb --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/rice/writecoff.go @@ -0,0 +1,42 @@ +package main + +import ( + "fmt" + "os" + "reflect" + + "github.com/akavel/rsrc/binutil" + "github.com/akavel/rsrc/coff" +) + +// copied from github.com/akavel/rsrc +// LICENSE: MIT +// Copyright 2013-2014 The rsrc Authors. (https://github.com/akavel/rsrc/blob/master/AUTHORS) +func writeCoff(coff *coff.Coff, fnameout string) error { + out, err := os.Create(fnameout) + if err != nil { + return err + } + defer out.Close() + w := binutil.Writer{W: out} + + // write the resulting file to disk + binutil.Walk(coff, func(v reflect.Value, path string) error { + if binutil.Plain(v.Kind()) { + w.WriteLE(v.Interface()) + return nil + } + vv, ok := v.Interface().(binutil.SizedReader) + if ok { + w.WriteFromSized(vv) + return binutil.WALK_SKIP + } + return nil + }) + + if w.Err != nil { + return fmt.Errorf("Error writing output file: %s", w.Err) + } + + return nil +} diff --git a/vendor/github.com/GeertJohan/go.rice/sort.go b/vendor/github.com/GeertJohan/go.rice/sort.go new file mode 100644 index 0000000000..cd83c658ed --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/sort.go @@ -0,0 +1,19 @@ +package rice + +import "os" + +// SortByName allows an array of os.FileInfo objects +// to be easily sorted by filename using sort.Sort(SortByName(array)) +type SortByName []os.FileInfo + +func (f SortByName) Len() int { return len(f) } +func (f SortByName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f SortByName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// SortByModified allows an array of os.FileInfo objects +// to be easily sorted by modified date using sort.Sort(SortByModified(array)) +type SortByModified []os.FileInfo + +func (f SortByModified) Len() int { return len(f) } +func (f SortByModified) Less(i, j int) bool { return f[i].ModTime().Unix() > f[j].ModTime().Unix() } +func (f SortByModified) Swap(i, j int) { f[i], f[j] = f[j], f[i] } diff --git a/vendor/github.com/GeertJohan/go.rice/virtual.go b/vendor/github.com/GeertJohan/go.rice/virtual.go new file mode 100644 index 0000000000..b175849b7e --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/virtual.go @@ -0,0 +1,304 @@ +package rice + +import ( + "errors" + "io" + "os" + "path/filepath" + "sort" + + "github.com/GeertJohan/go.rice/embedded" +) + +//++ TODO: IDEA: merge virtualFile and virtualDir, this decreases work done by rice.File + +// virtualFile is a 'stateful' virtual file. +// virtualFile wraps an *EmbeddedFile for a call to Box.Open() and virtualizes 'read cursor' (offset) and 'closing'. +// virtualFile is only internally visible and should be exposed through rice.File +type virtualFile struct { + *embedded.EmbeddedFile // the actual embedded file, embedded to obtain methods + offset int64 // read position on the virtual file + closed bool // closed when true +} + +// create a new virtualFile for given EmbeddedFile +func newVirtualFile(ef *embedded.EmbeddedFile) *virtualFile { + vf := &virtualFile{ + EmbeddedFile: ef, + offset: 0, + closed: false, + } + return vf +} + +//++ TODO check for nil pointers in all these methods. When so: return os.PathError with Err: os.ErrInvalid + +func (vf *virtualFile) close() error { + if vf.closed { + return &os.PathError{ + Op: "close", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("already closed"), + } + } + vf.EmbeddedFile = nil + vf.closed = true + return nil +} + +func (vf *virtualFile) stat() (os.FileInfo, error) { + if vf.closed { + return nil, &os.PathError{ + Op: "stat", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + return (*embeddedFileInfo)(vf.EmbeddedFile), nil +} + +func (vf *virtualFile) readdir(count int) ([]os.FileInfo, error) { + if vf.closed { + return nil, &os.PathError{ + Op: "readdir", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + return nil, os.ErrInvalid +} + +func (vf *virtualFile) readdirnames(count int) ([]string, error) { + if vf.closed { + return nil, &os.PathError{ + Op: "readdirnames", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + return nil, os.ErrInvalid +} + +func (vf *virtualFile) read(bts []byte) (int, error) { + if vf.closed { + return 0, &os.PathError{ + Op: "read", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + + end := vf.offset + int64(len(bts)) + + if end >= int64(len(vf.Content)) { + // end of file, so return what we have + EOF + n := copy(bts, vf.Content[vf.offset:]) + vf.offset = 0 + return n, io.EOF + } + + n := copy(bts, vf.Content[vf.offset:end]) + vf.offset += int64(n) + return n, nil + +} + +func (vf *virtualFile) seek(offset int64, whence int) (int64, error) { + if vf.closed { + return 0, &os.PathError{ + Op: "seek", + Path: vf.EmbeddedFile.Filename, + Err: errors.New("bad file descriptor"), + } + } + var e error + + //++ TODO: check if this is correct implementation for seek + switch whence { + case os.SEEK_SET: + //++ check if new offset isn't out of bounds, set e when it is, then break out of switch + vf.offset = offset + case os.SEEK_CUR: + //++ check if new offset isn't out of bounds, set e when it is, then break out of switch + vf.offset += offset + case os.SEEK_END: + //++ check if new offset isn't out of bounds, set e when it is, then break out of switch + vf.offset = int64(len(vf.EmbeddedFile.Content)) - offset + } + + if e != nil { + return 0, &os.PathError{ + Op: "seek", + Path: vf.Filename, + Err: e, + } + } + + return vf.offset, nil +} + +// virtualDir is a 'stateful' virtual directory. +// virtualDir wraps an *EmbeddedDir for a call to Box.Open() and virtualizes 'closing'. +// virtualDir is only internally visible and should be exposed through rice.File +type virtualDir struct { + *embedded.EmbeddedDir + offset int // readdir position on the directory + closed bool +} + +// create a new virtualDir for given EmbeddedDir +func newVirtualDir(ed *embedded.EmbeddedDir) *virtualDir { + vd := &virtualDir{ + EmbeddedDir: ed, + offset: 0, + closed: false, + } + return vd +} + +func (vd *virtualDir) close() error { + //++ TODO: needs sync mutex? + if vd.closed { + return &os.PathError{ + Op: "close", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("already closed"), + } + } + vd.closed = true + return nil +} + +func (vd *virtualDir) stat() (os.FileInfo, error) { + if vd.closed { + return nil, &os.PathError{ + Op: "stat", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + return (*embeddedDirInfo)(vd.EmbeddedDir), nil +} + +func (vd *virtualDir) readdir(n int) ([]os.FileInfo, error) { + + if vd.closed { + return nil, &os.PathError{ + Op: "readdir", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + + // Build up the array of our contents + var files []os.FileInfo + + // Add the child directories + for _, child := range vd.ChildDirs { + child.Filename = filepath.Base(child.Filename) + files = append(files, (*embeddedDirInfo)(child)) + } + + // Add the child files + for _, child := range vd.ChildFiles { + child.Filename = filepath.Base(child.Filename) + files = append(files, (*embeddedFileInfo)(child)) + } + + // Sort it by filename (lexical order) + sort.Sort(SortByName(files)) + + // Return all contents if that's what is requested + if n <= 0 { + vd.offset = 0 + return files, nil + } + + // If user has requested past the end of our list + // return what we can and send an EOF + if vd.offset+n >= len(files) { + offset := vd.offset + vd.offset = 0 + return files[offset:], io.EOF + } + + offset := vd.offset + vd.offset += n + return files[offset : offset+n], nil + +} + +func (vd *virtualDir) readdirnames(n int) ([]string, error) { + + if vd.closed { + return nil, &os.PathError{ + Op: "readdir", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + + // Build up the array of our contents + var files []string + + // Add the child directories + for _, child := range vd.ChildDirs { + files = append(files, filepath.Base(child.Filename)) + } + + // Add the child files + for _, child := range vd.ChildFiles { + files = append(files, filepath.Base(child.Filename)) + } + + // Sort it by filename (lexical order) + sort.Strings(files) + + // Return all contents if that's what is requested + if n <= 0 { + vd.offset = 0 + return files, nil + } + + // If user has requested past the end of our list + // return what we can and send an EOF + if vd.offset+n >= len(files) { + offset := vd.offset + vd.offset = 0 + return files[offset:], io.EOF + } + + offset := vd.offset + vd.offset += n + return files[offset : offset+n], nil +} + +func (vd *virtualDir) read(bts []byte) (int, error) { + if vd.closed { + return 0, &os.PathError{ + Op: "read", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + return 0, &os.PathError{ + Op: "read", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("is a directory"), + } +} + +func (vd *virtualDir) seek(offset int64, whence int) (int64, error) { + if vd.closed { + return 0, &os.PathError{ + Op: "seek", + Path: vd.EmbeddedDir.Filename, + Err: errors.New("bad file descriptor"), + } + } + return 0, &os.PathError{ + Op: "seek", + Path: vd.Filename, + Err: errors.New("is a directory"), + } +} diff --git a/vendor/github.com/GeertJohan/go.rice/walk.go b/vendor/github.com/GeertJohan/go.rice/walk.go new file mode 100644 index 0000000000..ee08ed5f0d --- /dev/null +++ b/vendor/github.com/GeertJohan/go.rice/walk.go @@ -0,0 +1,122 @@ +package rice + +import ( + "os" + "path/filepath" + "sort" + "strings" +) + +// Walk is like filepath.Walk() +// Visit http://golang.org/pkg/path/filepath/#Walk for more information +func (b *Box) Walk(path string, walkFn filepath.WalkFunc) error { + + pathFile, err := b.Open(path) + if err != nil { + return err + } + defer pathFile.Close() + + pathInfo, err := pathFile.Stat() + if err != nil { + return err + } + + if b.IsAppended() || b.IsEmbedded() { + return b.walk(path, pathInfo, walkFn) + } + + // We don't have any embedded or appended box so use live filesystem mode + return filepath.Walk(filepath.Join(b.absolutePath, path), func(path string, info os.FileInfo, err error) error { + + // Strip out the box name from the returned paths + path = strings.TrimPrefix(path, b.absolutePath+string(os.PathSeparator)) + return walkFn(path, info, err) + + }) + +} + +// walk recursively descends path. +// See walk() in $GOROOT/src/pkg/path/filepath/path.go +func (b *Box) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := b.readDirNames(path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + + filename := filepath.ToSlash(filepath.Join(path, name)) + fileObject, err := b.Open(filename) + if err != nil { + return err + } + defer fileObject.Close() + + fileInfo, err := fileObject.Stat() + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = b.walk(filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + + return nil + +} + +// readDirNames reads the directory named by path and returns a sorted list of directory entries. +// See readDirNames() in $GOROOT/pkg/path/filepath/path.go +func (b *Box) readDirNames(path string) ([]string, error) { + + f, err := b.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + stat, err := f.Stat() + if err != nil { + return nil, err + } + + if !stat.IsDir() { + return nil, nil + } + + infos, err := f.Readdir(0) + if err != nil { + return nil, err + } + + var names []string + + for _, info := range infos { + names = append(names, info.Name()) + } + + sort.Strings(names) + return names, nil + +} diff --git a/vendor/github.com/OpenBazaar/go-ethwallet/wallet/erc20_wallet.go b/vendor/github.com/OpenBazaar/go-ethwallet/wallet/erc20_wallet.go index d842fa7bb2..0a98f395c7 100644 --- a/vendor/github.com/OpenBazaar/go-ethwallet/wallet/erc20_wallet.go +++ b/vendor/github.com/OpenBazaar/go-ethwallet/wallet/erc20_wallet.go @@ -451,8 +451,8 @@ func (wallet *ERC20Wallet) callListeners(txnCB wi.TransactionCallback) { } // BumpFee - Bump the fee for the given transaction -func (wallet *ERC20Wallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { - return chainhash.NewHashFromStr(txid.String()) +func (wallet *ERC20Wallet) BumpFee(txid chainhash.Hash) (string, error) { + return txid.String(), nil } // EstimateFee - Calculates the estimated size of the transaction and returns the total fee for the given feePerByte @@ -476,8 +476,8 @@ func (wallet *ERC20Wallet) EstimateSpendFee(amount int64, feeLevel wi.FeeLevel) } // SweepAddress - Build and broadcast a transaction that sweeps all coins from an address. If it is a p2sh multisig, the redeemScript must be included -func (wallet *ERC20Wallet) SweepAddress(utxos []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { - return chainhash.NewHashFromStr("") +func (wallet *ERC20Wallet) SweepAddress(utxos []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { + return "", nil } // ExchangeRates - return the exchangerates diff --git a/vendor/github.com/OpenBazaar/go-ethwallet/wallet/wallet.go b/vendor/github.com/OpenBazaar/go-ethwallet/wallet/wallet.go index 3698cbfb28..f6d437ae1d 100644 --- a/vendor/github.com/OpenBazaar/go-ethwallet/wallet/wallet.go +++ b/vendor/github.com/OpenBazaar/go-ethwallet/wallet/wallet.go @@ -590,8 +590,8 @@ func (wallet *EthereumWallet) Transactions() ([]wi.Txn, error) { } // GetTransaction - Get info on a specific transaction -func (wallet *EthereumWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { - tx, _, err := wallet.client.GetTransaction(common.HexToHash(util.EnsureCorrectPrefix(txid.String()))) +func (wallet *EthereumWallet) GetTransaction(txid string) (wi.Txn, error) { + tx, _, err := wallet.client.GetTransaction(common.HexToHash(util.EnsureCorrectPrefix(txid))) if err != nil { return wi.Txn{}, err } @@ -651,17 +651,12 @@ func (wallet *EthereumWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error } // ChainTip - Get the height and best hash of the blockchain -func (wallet *EthereumWallet) ChainTip() (uint32, chainhash.Hash) { +func (wallet *EthereumWallet) ChainTip() (uint32, string) { num, hash, err := wallet.client.GetLatestBlock() if err != nil { - return 0, *emptyChainHash + return 0, "" } - h, err := util.CreateChainHash(hash.Hex()) - if err != nil { - log.Error(err.Error()) - h = emptyChainHash - } - return num, *h + return num, hash.String() } // GetFeePerByte - Get the current fee per byte @@ -677,14 +672,14 @@ func (wallet *EthereumWallet) GetFeePerByte(feeLevel wi.FeeLevel) big.Int { ret, _ = big.NewFloat(est.Average * 100000000).Int(nil) case wi.ECONOMIC, wi.SUPER_ECONOMIC: ret, _ = big.NewFloat(est.SafeLow * 100000000).Int(nil) - case wi.PRIORITY, wi.FEE_BUMP: + case wi.PRIOIRTY, wi.FEE_BUMP: ret, _ = big.NewFloat(est.Fast * 100000000).Int(nil) } return *ret } // Spend - Send ether to an external wallet -func (wallet *EthereumWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (*chainhash.Hash, error) { +func (wallet *EthereumWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (string, error) { var ( hash common.Hash h *chainhash.Hash @@ -705,7 +700,7 @@ func (wallet *EthereumWallet) Spend(amount big.Int, addr btcutil.Address, feeLev // check if the addr is a multisig addr scripts, err := wallet.db.WatchedScripts().GetAll() if err != nil { - return nil, err + return "", err } isScript := false addrEth := common.HexToAddress(addr.String()) @@ -723,7 +718,7 @@ func (wallet *EthereumWallet) Spend(amount big.Int, addr btcutil.Address, feeLev if isScript { ethScript, err := DeserializeEthScript(redeemScript) if err != nil { - return nil, err + return "", err } _, scrHash, err := GenScriptHash(ethScript) if err != nil { @@ -734,22 +729,22 @@ func (wallet *EthereumWallet) Spend(amount big.Int, addr btcutil.Address, feeLev hash, _, err = wallet.callAddTransaction(ethScript, &amount, feeLevel) if err != nil { log.Errorf("error call add txn: %v", err) - return nil, wi.ErrInsufficientFunds + return "", wi.ErrInsufficientFunds } } else { if !wallet.balanceCheck(feeLevel, amount) { - return nil, wi.ErrInsufficientFunds + return "", wi.ErrInsufficientFunds } hash, err = wallet.Transfer(util.EnsureCorrectPrefix(addr.String()), &amount, spendAll, wallet.GetFeePerByte(feeLevel)) } if err != nil { - return nil, err + return "", err } // txn is pending nonce, err = wallet.client.GetTxnNonce(util.EnsureCorrectPrefix(hash.Hex())) if err != nil { - return nil, err + return "", err } } if err == nil { @@ -773,7 +768,7 @@ func (wallet *EthereumWallet) Spend(amount big.Int, addr btcutil.Address, feeLev log.Error(err0.Error()) } } - return h, nil + return h.String(), nil } func (wallet *EthereumWallet) createTxnCallback(txID, orderID string, toAddress btcutil.Address, value big.Int, bTime time.Time, withInput bool, height int64) wi.TransactionCallback { @@ -836,7 +831,7 @@ func (wallet *EthereumWallet) checkTxnRcpt(hash *common.Hash, data []byte) (*com if err != nil { return nil, err } - err = wallet.db.Txns().Delete(chash) + err = wallet.db.Txns().Delete(chash.String()) if err != nil { log.Errorf("err deleting the pending txn : %v", err) } @@ -859,8 +854,8 @@ func (wallet *EthereumWallet) checkTxnRcpt(hash *common.Hash, data []byte) (*com } // BumpFee - Bump the fee for the given transaction -func (wallet *EthereumWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { - return util.CreateChainHash(txid.String()) +func (wallet *EthereumWallet) BumpFee(txid string) (string, error) { + return txid, nil } // EstimateFee - Calculates the estimated size of the transaction and returns the total fee for the given feePerByte @@ -908,7 +903,7 @@ func (wallet *EthereumWallet) EstimateSpendFee(amount big.Int, feeLevel wi.FeeLe } // SweepAddress - Build and broadcast a transaction that sweeps all coins from an address. If it is a p2sh multisig, the redeemScript must be included -func (wallet *EthereumWallet) SweepAddress(utxos []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (wallet *EthereumWallet) SweepAddress(utxos []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { outs := []wi.TransactionOutput{} for i, in := range utxos { @@ -923,16 +918,16 @@ func (wallet *EthereumWallet) SweepAddress(utxos []wi.TransactionInput, address sigs, err := wallet.CreateMultisigSignature([]wi.TransactionInput{}, outs, key, *redeemScript, *big.NewInt(1)) if err != nil { - return nil, err + return "", err } data, err := wallet.Multisign([]wi.TransactionInput{}, outs, sigs, []wi.Signature{}, *redeemScript, *big.NewInt(1), false) if err != nil { - return nil, err + return "", err } hash := common.BytesToHash(data) - return util.CreateChainHash(hash.Hex()) + return hash.Hex(), nil } // ExchangeRates - return the exchangerates @@ -1447,11 +1442,11 @@ func (wallet *EthereumWallet) ReSyncBlockchain(fromTime time.Time) { } // GetConfirmations - Return the number of confirmations and the height for a transaction -func (wallet *EthereumWallet) GetConfirmations(txid chainhash.Hash) (confirms, atHeight uint32, err error) { +func (wallet *EthereumWallet) GetConfirmations(txid string) (confirms, atHeight uint32, err error) { // TODO: etherscan api is being used // when mainnet is activated we may need a way to set the // url correctly - done 6 April 2019 - hash := common.HexToHash(util.EnsureCorrectPrefix(txid.String())) + hash := common.HexToHash(util.EnsureCorrectPrefix(txid)) network := etherscan.Rinkby if strings.Contains(wallet.client.url, "mainnet") { network = etherscan.Mainnet diff --git a/vendor/github.com/OpenBazaar/multiwallet/Dockerfile.dev b/vendor/github.com/OpenBazaar/multiwallet/Dockerfile.dev deleted file mode 100755 index 2dc03c4878..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/Dockerfile.dev +++ /dev/null @@ -1,10 +0,0 @@ -FROM golang:1.11 -VOLUME /var/lib/openbazaar - -WORKDIR /go/src/github.com/OpenBazaar/multiwallet -RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh && \ - go get -u github.com/derekparker/delve/cmd/dlv - -COPY . . - -ENTRYPOINT ["/bin/bash"] diff --git a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.lock b/vendor/github.com/OpenBazaar/multiwallet/Gopkg.lock deleted file mode 100644 index bb6ed82073..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.lock +++ /dev/null @@ -1,845 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:9f438ced0b98101978ebd63588a0e671406334aa2c711940351507e396b3de8d" - name = "github.com/OpenBazaar/go-ethwallet" - packages = [ - "util", - "wallet", - ] - pruneopts = "UT" - revision = "0d5cc6fff3a2f43ab3013c75ff6af14e2acc27df" - -[[projects]] - branch = "master" - digest = "1:12249c9e5740e17587799c25425a8058cb942da38e8226998cccc99a16dd9fe5" - name = "github.com/OpenBazaar/golang-socketio" - packages = [ - ".", - "protocol", - "transport", - ] - pruneopts = "UT" - revision = "4147b5f0d29491b7cacd6bf58d825f54fe2b24af" - -[[projects]] - digest = "1:37000ab67cf1d629a1611fb6f415d985b56449db3a537e215fec14499439914f" - name = "github.com/OpenBazaar/openbazaar-go" - packages = ["util"] - pruneopts = "UT" - revision = "2c8da24bc3ad1bf8827874e7bcb5151e240955bb" - version = "v0.13.8" - -[[projects]] - branch = "ethereum-master" - digest = "1:266d74b30258b3f6cff7c970b3dc7f9f3d61c8bed3ab16da0232e5860eb0ce45" - name = "github.com/OpenBazaar/spvwallet" - packages = [ - ".", - "exchangerates", - ] - pruneopts = "UT" - revision = "10951cd851492bdf52b4f7b408adcb6752312b23" - -[[projects]] - branch = "ethereum-master" - digest = "1:c411c1051b3ab7c823786bf82887580f2d93898be2504e145a67419afef5ba2d" - name = "github.com/OpenBazaar/wallet-interface" - packages = ["."] - pruneopts = "UT" - revision = "cbbb40466dcfe7d299f685fd0aa9a4fe9ea49147" - -[[projects]] - digest = "1:ef98291cf6c2dd0f53949a1899e9a58d3159ad44d20f74b62467bd2a807b01ce" - name = "github.com/VictoriaMetrics/fastcache" - packages = ["."] - pruneopts = "UT" - revision = "4d94f266cd3cecbcd97eaebee9e3d6d8cf918643" - version = "v1.4.6" - -[[projects]] - branch = "master" - digest = "1:7d191fd0c54ff370eaf6116a14dafe2a328df487baea280699f597aae858d00d" - name = "github.com/aristanetworks/goarista" - packages = ["monotime"] - pruneopts = "UT" - revision = "5d8d36c240c9af0ccde364594dd5fae756790b63" - -[[projects]] - digest = "1:0f98f59e9a2f4070d66f0c9c39561f68fcd1dc837b22a852d28d0003aebd1b1e" - name = "github.com/boltdb/bolt" - packages = ["."] - pruneopts = "UT" - revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8" - version = "v1.3.1" - -[[projects]] - digest = "1:38e337477887a8935559e3042ce53f14fcc24fd66635b57f423965c8297ccc90" - name = "github.com/btcsuite/btcd" - packages = [ - "addrmgr", - "blockchain", - "btcec", - "chaincfg", - "chaincfg/chainhash", - "connmgr", - "database", - "peer", - "txscript", - "wire", - ] - pruneopts = "UT" - revision = "f3ec13030e4e828869954472cbc51ac36bee5c1d" - version = "v0.20.1-beta" - -[[projects]] - branch = "master" - digest = "1:30d4a548e09bca4a0c77317c58e7407e2a65c15325e944f9c08a7b7992f8a59e" - name = "github.com/btcsuite/btclog" - packages = ["."] - pruneopts = "UT" - revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a" - -[[projects]] - branch = "master" - digest = "1:0faf30bd4ac78188ebeb913680b3dec83dccc62050f92fb2da3940d55e6a7977" - name = "github.com/btcsuite/btcutil" - packages = [ - ".", - "base58", - "bech32", - "bloom", - "coinset", - "hdkeychain", - "txsort", - ] - pruneopts = "UT" - revision = "e17c9730c422e7c745002430f2782b948b59c1c2" - -[[projects]] - digest = "1:7ffc24d91a12c173c18fe9ada86a05fc476f8943f4acffeddc6ec87a4f32bdef" - name = "github.com/btcsuite/btcwallet" - packages = [ - "wallet/txauthor", - "wallet/txrules", - "wallet/txsizes", - ] - pruneopts = "UT" - revision = "b19df70dddb66b27902f48cc48e69741909ef2e9" - version = "v0.11.0" - -[[projects]] - branch = "master" - digest = "1:1e6b2f7aa98b082c30a1303c29a702c369b2ec6d86b74a599bc8bbe2333db299" - name = "github.com/btcsuite/go-socks" - packages = ["socks"] - pruneopts = "UT" - revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f" - -[[projects]] - branch = "master" - digest = "1:49ad1acb33bb5b40c0d197321d3cf9ee9a29eb02f4765ab7c316e08983eb7559" - name = "github.com/btcsuite/golangcrypto" - packages = ["ripemd160"] - pruneopts = "UT" - revision = "53f62d9b43e87a6c56975cf862af7edf33a8d0df" - -[[projects]] - digest = "1:91fc3f4d1842584d1342364193106e80d1d532bbd1668fbd7c61627f01d0111f" - name = "github.com/btcsuite/goleveldb" - packages = [ - "leveldb/errors", - "leveldb/storage", - "leveldb/util", - ] - pruneopts = "UT" - revision = "3fd0373267b6461dbefe91cef614278064d05465" - version = "v1.0.0" - -[[projects]] - digest = "1:61406f6571eeb97717bdfaac37fa0bc5260621c4cbf3ce7635e9828dcbb5258a" - name = "github.com/cespare/xxhash" - packages = ["."] - pruneopts = "UT" - revision = "d7df74196a9e781ede915320c11c378c1b2f3a1f" - version = "v2.1.1" - -[[projects]] - branch = "master" - digest = "1:9d61b5ca59d3db0b1f1c1e9f5930b4f7c7fd954f54b70c1d83802b8805db918f" - name = "github.com/cevaris/ordered_map" - packages = ["."] - pruneopts = "UT" - revision = "3adeae072e730f1919a936e13b4923706d3f60fe" - -[[projects]] - branch = "master" - digest = "1:36236e7063db3314f32b60885ef7ddf8abab65cb055f3a21d118f7e19148cfa3" - name = "github.com/cpacia/bchutil" - packages = ["."] - pruneopts = "UT" - revision = "b126f6a35b6c2968c0877cb4d2ac5dcf67682d27" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:1e9a0ec4f7e852123fefad9aadd7647eed1e9fd3716118e99a4b3dc463705c82" - name = "github.com/dchest/siphash" - packages = ["."] - pruneopts = "UT" - revision = "34f201214d993633bb24f418ba11736ab8b55aa7" - version = "v1.2.1" - -[[projects]] - digest = "1:e47d51dab652d26c3fba6f8cba403f922d02757a82abdc77e90df7948daf296e" - name = "github.com/deckarep/golang-set" - packages = ["."] - pruneopts = "UT" - revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e" - version = "v1.7.1" - -[[projects]] - digest = "1:edb569dd02419a41ddd98768cc0e7aec922ef19dae139731e5ca750afcf6f4c5" - name = "github.com/edsrzf/mmap-go" - packages = ["."] - pruneopts = "UT" - revision = "188cc3b666ba704534fa4f96e9e61f21f1e1ba7c" - version = "v1.0.0" - -[[projects]] - digest = "1:2b2daa41f40acd66f4aa0ae213bfd286096663c999a26deb773127b7864c6bd0" - name = "github.com/elastic/gosigar" - packages = [ - ".", - "sys/windows", - ] - pruneopts = "UT" - revision = "7aef3366157f2bfdf3e068f73ce7193573e88e0c" - version = "v0.10.5" - -[[projects]] - digest = "1:41d4c366cbfdb3623d8f6fd866b142d238bbfc3a812712278f9fcb42e8644964" - name = "github.com/ethereum/go-ethereum" - packages = [ - ".", - "accounts", - "accounts/abi", - "accounts/abi/bind", - "accounts/external", - "accounts/keystore", - "accounts/scwallet", - "accounts/usbwallet", - "accounts/usbwallet/trezor", - "common", - "common/bitutil", - "common/hexutil", - "common/math", - "common/mclock", - "common/prque", - "consensus", - "consensus/clique", - "consensus/ethash", - "consensus/misc", - "core", - "core/bloombits", - "core/rawdb", - "core/state", - "core/types", - "core/vm", - "crypto", - "crypto/blake2b", - "crypto/bn256", - "crypto/bn256/cloudflare", - "crypto/bn256/google", - "crypto/ecies", - "crypto/secp256k1", - "eth/downloader", - "ethclient", - "ethdb", - "ethdb/leveldb", - "ethdb/memorydb", - "event", - "internal/ethapi", - "log", - "metrics", - "p2p", - "p2p/discover", - "p2p/discv5", - "p2p/enode", - "p2p/enr", - "p2p/nat", - "p2p/netutil", - "params", - "rlp", - "rpc", - "signer/core", - "signer/storage", - "trie", - ] - pruneopts = "T" - revision = "017449971e1e9e220efcd97d3313a0e27f47003b" - version = "v1.9.9" - -[[projects]] - branch = "master" - digest = "1:fedce3f87da3944ec15789ff4d1d17e77554148626d079a9ffd2cae6112fdc8b" - name = "github.com/gballet/go-libpcsclite" - packages = ["."] - pruneopts = "UT" - revision = "4678299bea08415f0ca8bd71da9610625cc86e86" - -[[projects]] - digest = "1:967f26d236f25cce1fcc98b88e2ea526a556fe3f9cbf1d6cb404aa72b2b858a9" - name = "github.com/gcash/bchd" - packages = [ - "bchec", - "chaincfg", - "chaincfg/chainhash", - "txscript", - "wire", - ] - pruneopts = "UT" - revision = "4e8fe019ad33cead8f4d58642394e990f855d0a3" - version = "v0.15.2" - -[[projects]] - branch = "master" - digest = "1:b1053b781e9090dab5d3e916eb04c8d85b63a7f6911007c2cd1dd82fb22f7f6a" - name = "github.com/gcash/bchlog" - packages = ["."] - pruneopts = "UT" - revision = "b4f036f92fa66c88eec458f4531ff14ff87704d6" - -[[projects]] - branch = "master" - digest = "1:b1c06051d563f82aa7ec6cb3f759d51301936cc0426933c7c567d8f2fd004c59" - name = "github.com/gcash/bchutil" - packages = [ - ".", - "base58", - ] - pruneopts = "UT" - revision = "98e73ec336ba521482403cf7cda69281170e50e0" - -[[projects]] - digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" - name = "github.com/go-stack/stack" - packages = ["."] - pruneopts = "UT" - revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" - version = "v1.8.0" - -[[projects]] - branch = "master" - digest = "1:228f39dbc93e88d95a024f45d5beea0a64cd33e89bdfb841a1669abb74f8b1e9" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "4e55bbcbfaa105a596caba5bbc20d392806beda9" - -[[projects]] - digest = "1:e4f5819333ac698d294fe04dbf640f84719658d5c7ce195b10060cc37292ce79" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "UT" - revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" - version = "v0.0.1" - -[[projects]] - digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "UT" - revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:e62657cca9badaa308d86e7716083e4c5933bb78e30a17743fc67f50be26f6f4" - name = "github.com/gorilla/websocket" - packages = ["."] - pruneopts = "UT" - revision = "c3e18be99d19e6b3e8f1559eea2c161a665c4b6b" - -[[projects]] - digest = "1:e631368e174090a276fc00b48283f92ac4ccfbbb1945bcfcee083f5f9210dc00" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "UT" - revision = "14eae340515388ca95aa8e7b86f0de668e981f54" - version = "v0.5.4" - -[[projects]] - digest = "1:c00cc6d95a674b4b923ac069d364445043bc67836e9bd8aeff8440cfbe6a2cc7" - name = "github.com/huin/goupnp" - packages = [ - ".", - "dcps/internetgateway1", - "dcps/internetgateway2", - "httpu", - "scpd", - "soap", - "ssdp", - ] - pruneopts = "UT" - revision = "656e61dfadd241c7cbdd22a023fa81ecb6860ea8" - version = "v1.0.0" - -[[projects]] - digest = "1:94d189f7124eba234224e1a3d28b943d826d480cf71cc71d53c2eac8132f31ed" - name = "github.com/hunterlong/tokenbalance" - packages = ["."] - pruneopts = "UT" - revision = "1fcaffaac40cf0559ccba1276d90757bdf1284e9" - version = "v1.72" - -[[projects]] - digest = "1:71193da2829127d2cd7d2045175a65ef04d79176de5f1ebb185d331daa53b5c9" - name = "github.com/jackpal/go-nat-pmp" - packages = ["."] - pruneopts = "UT" - revision = "059203efa1edd7130293a583541b8308e7c640c4" - version = "v1.0.2" - -[[projects]] - branch = "master" - digest = "1:459271b8268fe541549b299f65160b1df5abe9ffef0426cc38607f771dbc6bb4" - name = "github.com/jessevdk/go-flags" - packages = ["."] - pruneopts = "UT" - revision = "c0795c8afcf41dd1d786bebce68636c199b3bb45" - -[[projects]] - branch = "master" - digest = "1:f275e994e11f9bec072885d81a8aaa1a95bdd0ebca4cd78f1d37d3d84f88f3b8" - name = "github.com/karalabe/usb" - packages = ["."] - pruneopts = "T" - revision = "911d15fe12a9c411cf5d0dd5635231c759399bed" - -[[projects]] - branch = "master" - digest = "1:5a580af993fe973c9c96ca9ed10f64e979a85eab3b3f56e50003fefb91b10db9" - name = "github.com/ltcsuite/ltcd" - packages = [ - "btcec", - "chaincfg", - "chaincfg/chainhash", - "txscript", - "wire", - ] - pruneopts = "UT" - revision = "92166e4124994fcca26545ee95d5532749593596" - -[[projects]] - branch = "master" - digest = "1:7e604729bde3f3f9f01454a2e13b99e475ec725794ae5b9d4f8a62ccd8608493" - name = "github.com/ltcsuite/ltcutil" - packages = [ - ".", - "base58", - "bech32", - ] - pruneopts = "UT" - revision = "6bec450ea6ad382fc379160f355562b64382366c" - -[[projects]] - branch = "master" - digest = "1:a302d142a103687a0dc12e2c1fffc4128011b6ed27dbc969c549799b23f57b8d" - name = "github.com/ltcsuite/ltcwallet" - packages = ["wallet/txrules"] - pruneopts = "UT" - revision = "fc621f0f45c334831b2dda5ae8b85cf0185fe114" - -[[projects]] - digest = "1:eb1bffab7260bf5ddc95fc2c41d4bfee1a4f5fe18194b3946fe8a9e9121a282f" - name = "github.com/mattn/go-runewidth" - packages = ["."] - pruneopts = "UT" - revision = "a4df4ddbff020e131056d91f580a1cdcd806e3ae" - version = "v0.0.8" - -[[projects]] - branch = "master" - digest = "1:130cefe87d7eeefc824978dcb78e35672d4c49a11f25c153fbf0cfd952756fa3" - name = "github.com/minio/blake2b-simd" - packages = ["."] - pruneopts = "UT" - revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4" - -[[projects]] - digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "UT" - revision = "af06845cf3004701891bf4fdb884bfe4920b3727" - version = "v1.1.0" - -[[projects]] - digest = "1:00e810a1a88ae7bccca5a31de78ddb0db5ce5cc27dcd5744aa452db3e65f2a9a" - name = "github.com/nanmu42/etherscan-api" - packages = ["."] - pruneopts = "UT" - revision = "586884d258b6b8b22d5bd039e270d33572888f54" - version = "v1.1.0" - -[[projects]] - digest = "1:b8261a46d75566ebf5b4fb6bb762f54f47e6633e0995118393afc80bb1f428f5" - name = "github.com/olekukonko/tablewriter" - packages = ["."] - pruneopts = "UT" - revision = "876dd0e0227ec99c0243b639b92139915b65331a" - version = "v0.0.4" - -[[projects]] - digest = "1:5b3b29ce0e569f62935d9541dff2e16cc09df981ebde48e82259076a73a3d0c7" - name = "github.com/op/go-logging" - packages = ["."] - pruneopts = "UT" - revision = "b2cb9fa56473e98db8caba80237377e83fe44db5" - version = "v1" - -[[projects]] - digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "UT" - revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" - version = "v1.2" - -[[projects]] - digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "614d223910a179a466c1767a985424175c39b465" - version = "v0.9.1" - -[[projects]] - digest = "1:19a227084137c73d7a519ff90acc9fa69855c2ba134bb9c7dfe94e9ad2949c64" - name = "github.com/prometheus/tsdb" - packages = ["fileutil"] - pruneopts = "UT" - revision = "7762249358193da791ec62e72b080d908f96e776" - version = "v0.10.0" - -[[projects]] - digest = "1:31d83d1b1c288073c91abadee3caec87de2a1fb5dbe589039264a802e67a26b8" - name = "github.com/rjeczalik/notify" - packages = ["."] - pruneopts = "UT" - revision = "69d839f37b13a8cb7a78366f7633a4071cb43be7" - version = "v0.9.2" - -[[projects]] - digest = "1:c5dfe46811af7e2eff7c11fc84b6c841520338613c056f659f262d5a4fb42fa8" - name = "github.com/rs/cors" - packages = ["."] - pruneopts = "UT" - revision = "db0fe48135e83b5812a5a31be0eea66984b1b521" - version = "v1.7.0" - -[[projects]] - digest = "1:81e02c4edb639c80559c0650f9401d3e2dcc3256d1fa215382bb7c83c1db9126" - name = "github.com/shopspring/decimal" - packages = ["."] - pruneopts = "UT" - revision = "cd690d0c9e2447b1ef2a129a6b7b49077da89b8e" - version = "1.1.0" - -[[projects]] - branch = "develop" - digest = "1:6deccaba5762377091f2e5b26dba70e630e01edb3f95d1a6a59d9b098bd4358f" - name = "github.com/status-im/keycard-go" - packages = ["derivationpath"] - pruneopts = "UT" - revision = "f38e9a19958eb492359ace5d068a7ce42e7824f8" - -[[projects]] - digest = "1:266e2f508feb9a9a765bfeb74d116a88514248b2f8428788dcce574bd026b9c0" - name = "github.com/steakknife/bloomfilter" - packages = ["."] - pruneopts = "UT" - revision = "99ee86d9200fcc2ffde62f508329bd6627c0a307" - version = "1.0.4" - -[[projects]] - digest = "1:5ca4bdccd72e66aaba5b52f9c4a21f1021102f0919432fe138ad5d48abf06833" - name = "github.com/steakknife/hamming" - packages = ["."] - pruneopts = "UT" - revision = "003c143a81c25ea5e263d692919c611c7122ae6b" - version = "0.2.5" - -[[projects]] - digest = "1:c345767003e0d53971e0f409a42b875dab3fee7ec269557c863ebbb194341420" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util", - ] - pruneopts = "UT" - revision = "758128399b1df3a87e92df6c26c1d2063da8fabe" - -[[projects]] - digest = "1:91b40a2adb6b4ccd51b1dfb306edfa76139e1599b01666346085472b386f5447" - name = "github.com/tyler-smith/go-bip39" - packages = [ - ".", - "wordlists", - ] - pruneopts = "UT" - revision = "5e3853c3f4e1a44df487c7efeb064ee8b43755de" - version = "1.0.2" - -[[projects]] - branch = "master" - digest = "1:7dca0da64f5937af74f21618cdb812c8f16a7d042316dd5bf2f1dfd086be3fc6" - name = "github.com/wsddn/go-ecdh" - packages = ["."] - pruneopts = "UT" - revision = "48726bab92085232373de4ec5c51ce7b441c63a0" - -[[projects]] - branch = "master" - digest = "1:b90cae04932efcf7747730936b3ed1d52a913a97e7a658869bed2539b7a956df" - name = "golang.org/x/crypto" - packages = [ - "curve25519", - "pbkdf2", - "ripemd160", - "scrypt", - "sha3", - "ssh/terminal", - ] - pruneopts = "UT" - revision = "6d4e4cb37c7d6416dfea8472e751c7b6615267a6" - -[[projects]] - branch = "master" - digest = "1:de276a36cf59627923c9aca0ed1425b0dd49a90a0e4542abb97de61cc8f625e6" - name = "golang.org/x/net" - packages = [ - "context", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/socks", - "internal/timeseries", - "proxy", - "trace", - ] - pruneopts = "UT" - revision = "6afb5195e5aab057fda82e27171243402346b0ad" - -[[projects]] - branch = "master" - digest = "1:6c49651edbcfad5c1ed228e827b5dbeefe656f8b71852093d9f4c470c71eb21a" - name = "golang.org/x/sys" - packages = [ - "cpu", - "unix", - "windows", - ] - pruneopts = "UT" - revision = "b77594299b429d05028403d72b68172959c7dad5" - -[[projects]] - digest = "1:28deae5fe892797ff37a317b5bcda96d11d1c90dadd89f1337651df3bc4c586e" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - version = "v0.3.2" - -[[projects]] - branch = "master" - digest = "1:583a0c80f5e3a9343d33aea4aead1e1afcc0043db66fdf961ddd1fe8cd3a4faf" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "ca5a22157cba8746e7aa978de1b1ac4085150840" - -[[projects]] - branch = "master" - digest = "1:fe5ba00f68cf67a31f8dca3a0942fd76612868e2e915c5a70a6e3926ba2ac258" - name = "google.golang.org/grpc" - packages = [ - ".", - "attributes", - "backoff", - "balancer", - "balancer/base", - "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", - "codes", - "connectivity", - "credentials", - "credentials/internal", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/balancerload", - "internal/binarylog", - "internal/buffer", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/grpcsync", - "internal/resolver/dns", - "internal/resolver/passthrough", - "internal/syscall", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "reflection", - "reflection/grpc_reflection_v1alpha", - "resolver", - "serviceconfig", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "3311b9ea68a8c6c1554506af68fcbd9fe3e1c74f" - -[[projects]] - branch = "v1" - digest = "1:b8621abaa059e2a06b87dada1b8439ddc53b71e7b7794346cf3cc92c6fad9f0d" - name = "gopkg.in/jarcoal/httpmock.v1" - packages = ["."] - pruneopts = "UT" - revision = "a728a90ba3c33b3752e8a8ad8c0409cb78a62287" - -[[projects]] - branch = "v2" - digest = "1:3d3f9391ab615be8655ae0d686a1564f3fec413979bb1aaf018bac1ec1bb1cc7" - name = "gopkg.in/natefinch/npipe.v2" - packages = ["."] - pruneopts = "UT" - revision = "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6" - -[[projects]] - digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" - version = "v2.2.7" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/OpenBazaar/go-ethwallet/wallet", - "github.com/OpenBazaar/golang-socketio", - "github.com/OpenBazaar/golang-socketio/protocol", - "github.com/OpenBazaar/golang-socketio/transport", - "github.com/OpenBazaar/spvwallet", - "github.com/OpenBazaar/spvwallet/exchangerates", - "github.com/OpenBazaar/wallet-interface", - "github.com/btcsuite/btcd/blockchain", - "github.com/btcsuite/btcd/btcec", - "github.com/btcsuite/btcd/chaincfg", - "github.com/btcsuite/btcd/chaincfg/chainhash", - "github.com/btcsuite/btcd/txscript", - "github.com/btcsuite/btcd/wire", - "github.com/btcsuite/btcutil", - "github.com/btcsuite/btcutil/base58", - "github.com/btcsuite/btcutil/bech32", - "github.com/btcsuite/btcutil/coinset", - "github.com/btcsuite/btcutil/hdkeychain", - "github.com/btcsuite/btcutil/txsort", - "github.com/btcsuite/btcwallet/wallet/txauthor", - "github.com/btcsuite/btcwallet/wallet/txrules", - "github.com/btcsuite/golangcrypto/ripemd160", - "github.com/cpacia/bchutil", - "github.com/gcash/bchd/chaincfg/chainhash", - "github.com/gcash/bchd/txscript", - "github.com/gcash/bchd/wire", - "github.com/golang/protobuf/proto", - "github.com/golang/protobuf/ptypes/timestamp", - "github.com/gorilla/websocket", - "github.com/jessevdk/go-flags", - "github.com/ltcsuite/ltcd/chaincfg", - "github.com/ltcsuite/ltcd/chaincfg/chainhash", - "github.com/ltcsuite/ltcutil", - "github.com/ltcsuite/ltcutil/base58", - "github.com/ltcsuite/ltcwallet/wallet/txrules", - "github.com/minio/blake2b-simd", - "github.com/op/go-logging", - "github.com/tyler-smith/go-bip39", - "golang.org/x/crypto/ripemd160", - "golang.org/x/net/context", - "golang.org/x/net/proxy", - "google.golang.org/grpc", - "google.golang.org/grpc/reflection", - "gopkg.in/jarcoal/httpmock.v1", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.toml b/vendor/github.com/OpenBazaar/multiwallet/Gopkg.toml deleted file mode 100644 index 4f0c6849ff..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.toml +++ /dev/null @@ -1,126 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - branch = "master" - name = "github.com/OpenBazaar/golang-socketio" - -[[constraint]] - branch = "ethereum-master" - name = "github.com/OpenBazaar/spvwallet" - -[[constraint]] - branch = "ethereum-master" - name = "github.com/OpenBazaar/wallet-interface" - -[[constraint]] - version = "v0.20.1-beta" - name = "github.com/btcsuite/btcd" - -[[constraint]] - branch = "master" - name = "github.com/btcsuite/btcutil" - -[[constraint]] - version = "v0.11.0" - name = "github.com/btcsuite/btcwallet" - -[[constraint]] - branch = "master" - name = "github.com/btcsuite/golangcrypto" - -[[constraint]] - branch = "master" - name = "github.com/cpacia/bchutil" - -[[constraint]] - branch = "master" - name = "github.com/golang/protobuf" - -[[constraint]] - branch = "master" - name = "github.com/gorilla/websocket" - -[[constraint]] - branch = "master" - name = "github.com/jessevdk/go-flags" - -[[constraint]] - branch = "master" - name = "github.com/ltcsuite/ltcd" - -[[constraint]] - branch = "master" - name = "github.com/ltcsuite/ltcutil" - -[[constraint]] - branch = "master" - name = "github.com/ltcsuite/ltcwallet" - -[[constraint]] - branch = "master" - name = "github.com/minio/blake2b-simd" - -[[constraint]] - version = "v1.0" - name = "github.com/op/go-logging" - -[[constraint]] - version = "v1.0.2" - name = "github.com/tyler-smith/go-bip39" - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "google.golang.org/grpc" - -[[constraint]] - branch = "v1" - name = "gopkg.in/jarcoal/httpmock.v1" - -[[override]] - revision = "758128399b1df3a87e92df6c26c1d2063da8fabe" - name = "github.com/syndtr/goleveldb" - -[prune] - go-tests = true - unused-packages = true - -[[prune.project]] - name = "github.com/ethereum/go-ethereum" - unused-packages = false - -[[prune.project]] - name = "github.com/karalabe/usb" - unused-packages = false diff --git a/vendor/github.com/OpenBazaar/multiwallet/api/pb/api.pb.go b/vendor/github.com/OpenBazaar/multiwallet/api/pb/api.pb.go new file mode 100644 index 0000000000..c903b33f60 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/api/pb/api.pb.go @@ -0,0 +1,2602 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: api.proto + +package pb + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CoinType int32 + +const ( + CoinType_BITCOIN CoinType = 0 + CoinType_BITCOIN_CASH CoinType = 1 + CoinType_ZCASH CoinType = 2 + CoinType_LITECOIN CoinType = 3 + CoinType_ETHEREUM CoinType = 4 +) + +var CoinType_name = map[int32]string{ + 0: "BITCOIN", + 1: "BITCOIN_CASH", + 2: "ZCASH", + 3: "LITECOIN", + 4: "ETHEREUM", +} +var CoinType_value = map[string]int32{ + "BITCOIN": 0, + "BITCOIN_CASH": 1, + "ZCASH": 2, + "LITECOIN": 3, + "ETHEREUM": 4, +} + +func (x CoinType) String() string { + return proto.EnumName(CoinType_name, int32(x)) +} +func (CoinType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{0} +} + +type KeyPurpose int32 + +const ( + KeyPurpose_INTERNAL KeyPurpose = 0 + KeyPurpose_EXTERNAL KeyPurpose = 1 +) + +var KeyPurpose_name = map[int32]string{ + 0: "INTERNAL", + 1: "EXTERNAL", +} +var KeyPurpose_value = map[string]int32{ + "INTERNAL": 0, + "EXTERNAL": 1, +} + +func (x KeyPurpose) String() string { + return proto.EnumName(KeyPurpose_name, int32(x)) +} +func (KeyPurpose) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{1} +} + +type FeeLevel int32 + +const ( + FeeLevel_ECONOMIC FeeLevel = 0 + FeeLevel_NORMAL FeeLevel = 1 + FeeLevel_PRIORITY FeeLevel = 2 +) + +var FeeLevel_name = map[int32]string{ + 0: "ECONOMIC", + 1: "NORMAL", + 2: "PRIORITY", +} +var FeeLevel_value = map[string]int32{ + "ECONOMIC": 0, + "NORMAL": 1, + "PRIORITY": 2, +} + +func (x FeeLevel) String() string { + return proto.EnumName(FeeLevel_name, int32(x)) +} +func (FeeLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{2} +} + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{0} +} +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +type CoinSelection struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CoinSelection) Reset() { *m = CoinSelection{} } +func (m *CoinSelection) String() string { return proto.CompactTextString(m) } +func (*CoinSelection) ProtoMessage() {} +func (*CoinSelection) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{1} +} +func (m *CoinSelection) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CoinSelection.Unmarshal(m, b) +} +func (m *CoinSelection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CoinSelection.Marshal(b, m, deterministic) +} +func (dst *CoinSelection) XXX_Merge(src proto.Message) { + xxx_messageInfo_CoinSelection.Merge(dst, src) +} +func (m *CoinSelection) XXX_Size() int { + return xxx_messageInfo_CoinSelection.Size(m) +} +func (m *CoinSelection) XXX_DiscardUnknown() { + xxx_messageInfo_CoinSelection.DiscardUnknown(m) +} + +var xxx_messageInfo_CoinSelection proto.InternalMessageInfo + +func (m *CoinSelection) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +type Row struct { + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return proto.CompactTextString(m) } +func (*Row) ProtoMessage() {} +func (*Row) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{2} +} +func (m *Row) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Row.Unmarshal(m, b) +} +func (m *Row) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Row.Marshal(b, m, deterministic) +} +func (dst *Row) XXX_Merge(src proto.Message) { + xxx_messageInfo_Row.Merge(dst, src) +} +func (m *Row) XXX_Size() int { + return xxx_messageInfo_Row.Size(m) +} +func (m *Row) XXX_DiscardUnknown() { + xxx_messageInfo_Row.DiscardUnknown(m) +} + +var xxx_messageInfo_Row proto.InternalMessageInfo + +func (m *Row) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +type KeySelection struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Purpose KeyPurpose `protobuf:"varint,2,opt,name=purpose,proto3,enum=pb.KeyPurpose" json:"purpose,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeySelection) Reset() { *m = KeySelection{} } +func (m *KeySelection) String() string { return proto.CompactTextString(m) } +func (*KeySelection) ProtoMessage() {} +func (*KeySelection) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{3} +} +func (m *KeySelection) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeySelection.Unmarshal(m, b) +} +func (m *KeySelection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeySelection.Marshal(b, m, deterministic) +} +func (dst *KeySelection) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeySelection.Merge(dst, src) +} +func (m *KeySelection) XXX_Size() int { + return xxx_messageInfo_KeySelection.Size(m) +} +func (m *KeySelection) XXX_DiscardUnknown() { + xxx_messageInfo_KeySelection.DiscardUnknown(m) +} + +var xxx_messageInfo_KeySelection proto.InternalMessageInfo + +func (m *KeySelection) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *KeySelection) GetPurpose() KeyPurpose { + if m != nil { + return m.Purpose + } + return KeyPurpose_INTERNAL +} + +type Address struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Address) Reset() { *m = Address{} } +func (m *Address) String() string { return proto.CompactTextString(m) } +func (*Address) ProtoMessage() {} +func (*Address) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{4} +} +func (m *Address) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Address.Unmarshal(m, b) +} +func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Address.Marshal(b, m, deterministic) +} +func (dst *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(dst, src) +} +func (m *Address) XXX_Size() int { + return xxx_messageInfo_Address.Size(m) +} +func (m *Address) XXX_DiscardUnknown() { + xxx_messageInfo_Address.DiscardUnknown(m) +} + +var xxx_messageInfo_Address proto.InternalMessageInfo + +func (m *Address) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *Address) GetAddr() string { + if m != nil { + return m.Addr + } + return "" +} + +type Height struct { + Height uint32 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Height) Reset() { *m = Height{} } +func (m *Height) String() string { return proto.CompactTextString(m) } +func (*Height) ProtoMessage() {} +func (*Height) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{5} +} +func (m *Height) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Height.Unmarshal(m, b) +} +func (m *Height) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Height.Marshal(b, m, deterministic) +} +func (dst *Height) XXX_Merge(src proto.Message) { + xxx_messageInfo_Height.Merge(dst, src) +} +func (m *Height) XXX_Size() int { + return xxx_messageInfo_Height.Size(m) +} +func (m *Height) XXX_DiscardUnknown() { + xxx_messageInfo_Height.DiscardUnknown(m) +} + +var xxx_messageInfo_Height proto.InternalMessageInfo + +func (m *Height) GetHeight() uint32 { + if m != nil { + return m.Height + } + return 0 +} + +type Balances struct { + Confirmed uint64 `protobuf:"varint,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"` + Unconfirmed uint64 `protobuf:"varint,2,opt,name=unconfirmed,proto3" json:"unconfirmed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Balances) Reset() { *m = Balances{} } +func (m *Balances) String() string { return proto.CompactTextString(m) } +func (*Balances) ProtoMessage() {} +func (*Balances) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{6} +} +func (m *Balances) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Balances.Unmarshal(m, b) +} +func (m *Balances) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Balances.Marshal(b, m, deterministic) +} +func (dst *Balances) XXX_Merge(src proto.Message) { + xxx_messageInfo_Balances.Merge(dst, src) +} +func (m *Balances) XXX_Size() int { + return xxx_messageInfo_Balances.Size(m) +} +func (m *Balances) XXX_DiscardUnknown() { + xxx_messageInfo_Balances.DiscardUnknown(m) +} + +var xxx_messageInfo_Balances proto.InternalMessageInfo + +func (m *Balances) GetConfirmed() uint64 { + if m != nil { + return m.Confirmed + } + return 0 +} + +func (m *Balances) GetUnconfirmed() uint64 { + if m != nil { + return m.Unconfirmed + } + return 0 +} + +type Key struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{7} +} +func (m *Key) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Key.Unmarshal(m, b) +} +func (m *Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Key.Marshal(b, m, deterministic) +} +func (dst *Key) XXX_Merge(src proto.Message) { + xxx_messageInfo_Key.Merge(dst, src) +} +func (m *Key) XXX_Size() int { + return xxx_messageInfo_Key.Size(m) +} +func (m *Key) XXX_DiscardUnknown() { + xxx_messageInfo_Key.DiscardUnknown(m) +} + +var xxx_messageInfo_Key proto.InternalMessageInfo + +func (m *Key) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +type Keys struct { + Keys []*Key `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Keys) Reset() { *m = Keys{} } +func (m *Keys) String() string { return proto.CompactTextString(m) } +func (*Keys) ProtoMessage() {} +func (*Keys) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{8} +} +func (m *Keys) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Keys.Unmarshal(m, b) +} +func (m *Keys) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Keys.Marshal(b, m, deterministic) +} +func (dst *Keys) XXX_Merge(src proto.Message) { + xxx_messageInfo_Keys.Merge(dst, src) +} +func (m *Keys) XXX_Size() int { + return xxx_messageInfo_Keys.Size(m) +} +func (m *Keys) XXX_DiscardUnknown() { + xxx_messageInfo_Keys.DiscardUnknown(m) +} + +var xxx_messageInfo_Keys proto.InternalMessageInfo + +func (m *Keys) GetKeys() []*Key { + if m != nil { + return m.Keys + } + return nil +} + +type Addresses struct { + Addresses []*Address `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Addresses) Reset() { *m = Addresses{} } +func (m *Addresses) String() string { return proto.CompactTextString(m) } +func (*Addresses) ProtoMessage() {} +func (*Addresses) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{9} +} +func (m *Addresses) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Addresses.Unmarshal(m, b) +} +func (m *Addresses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Addresses.Marshal(b, m, deterministic) +} +func (dst *Addresses) XXX_Merge(src proto.Message) { + xxx_messageInfo_Addresses.Merge(dst, src) +} +func (m *Addresses) XXX_Size() int { + return xxx_messageInfo_Addresses.Size(m) +} +func (m *Addresses) XXX_DiscardUnknown() { + xxx_messageInfo_Addresses.DiscardUnknown(m) +} + +var xxx_messageInfo_Addresses proto.InternalMessageInfo + +func (m *Addresses) GetAddresses() []*Address { + if m != nil { + return m.Addresses + } + return nil +} + +type BoolResponse struct { + Bool bool `protobuf:"varint,1,opt,name=bool,proto3" json:"bool,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolResponse) Reset() { *m = BoolResponse{} } +func (m *BoolResponse) String() string { return proto.CompactTextString(m) } +func (*BoolResponse) ProtoMessage() {} +func (*BoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{10} +} +func (m *BoolResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolResponse.Unmarshal(m, b) +} +func (m *BoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolResponse.Marshal(b, m, deterministic) +} +func (dst *BoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolResponse.Merge(dst, src) +} +func (m *BoolResponse) XXX_Size() int { + return xxx_messageInfo_BoolResponse.Size(m) +} +func (m *BoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolResponse proto.InternalMessageInfo + +func (m *BoolResponse) GetBool() bool { + if m != nil { + return m.Bool + } + return false +} + +type NetParams struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetParams) Reset() { *m = NetParams{} } +func (m *NetParams) String() string { return proto.CompactTextString(m) } +func (*NetParams) ProtoMessage() {} +func (*NetParams) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{11} +} +func (m *NetParams) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NetParams.Unmarshal(m, b) +} +func (m *NetParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NetParams.Marshal(b, m, deterministic) +} +func (dst *NetParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetParams.Merge(dst, src) +} +func (m *NetParams) XXX_Size() int { + return xxx_messageInfo_NetParams.Size(m) +} +func (m *NetParams) XXX_DiscardUnknown() { + xxx_messageInfo_NetParams.DiscardUnknown(m) +} + +var xxx_messageInfo_NetParams proto.InternalMessageInfo + +func (m *NetParams) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type TransactionList struct { + Transactions []*Tx `protobuf:"bytes,1,rep,name=transactions,proto3" json:"transactions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransactionList) Reset() { *m = TransactionList{} } +func (m *TransactionList) String() string { return proto.CompactTextString(m) } +func (*TransactionList) ProtoMessage() {} +func (*TransactionList) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{12} +} +func (m *TransactionList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TransactionList.Unmarshal(m, b) +} +func (m *TransactionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TransactionList.Marshal(b, m, deterministic) +} +func (dst *TransactionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransactionList.Merge(dst, src) +} +func (m *TransactionList) XXX_Size() int { + return xxx_messageInfo_TransactionList.Size(m) +} +func (m *TransactionList) XXX_DiscardUnknown() { + xxx_messageInfo_TransactionList.DiscardUnknown(m) +} + +var xxx_messageInfo_TransactionList proto.InternalMessageInfo + +func (m *TransactionList) GetTransactions() []*Tx { + if m != nil { + return m.Transactions + } + return nil +} + +type Tx struct { + Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + Height int32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + WatchOnly bool `protobuf:"varint,5,opt,name=watchOnly,proto3" json:"watchOnly,omitempty"` + Raw []byte `protobuf:"bytes,6,opt,name=raw,proto3" json:"raw,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Tx) Reset() { *m = Tx{} } +func (m *Tx) String() string { return proto.CompactTextString(m) } +func (*Tx) ProtoMessage() {} +func (*Tx) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{13} +} +func (m *Tx) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Tx.Unmarshal(m, b) +} +func (m *Tx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Tx.Marshal(b, m, deterministic) +} +func (dst *Tx) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tx.Merge(dst, src) +} +func (m *Tx) XXX_Size() int { + return xxx_messageInfo_Tx.Size(m) +} +func (m *Tx) XXX_DiscardUnknown() { + xxx_messageInfo_Tx.DiscardUnknown(m) +} + +var xxx_messageInfo_Tx proto.InternalMessageInfo + +func (m *Tx) GetTxid() string { + if m != nil { + return m.Txid + } + return "" +} + +func (m *Tx) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Tx) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Tx) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *Tx) GetWatchOnly() bool { + if m != nil { + return m.WatchOnly + } + return false +} + +func (m *Tx) GetRaw() []byte { + if m != nil { + return m.Raw + } + return nil +} + +type Txid struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Txid) Reset() { *m = Txid{} } +func (m *Txid) String() string { return proto.CompactTextString(m) } +func (*Txid) ProtoMessage() {} +func (*Txid) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{14} +} +func (m *Txid) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Txid.Unmarshal(m, b) +} +func (m *Txid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Txid.Marshal(b, m, deterministic) +} +func (dst *Txid) XXX_Merge(src proto.Message) { + xxx_messageInfo_Txid.Merge(dst, src) +} +func (m *Txid) XXX_Size() int { + return xxx_messageInfo_Txid.Size(m) +} +func (m *Txid) XXX_DiscardUnknown() { + xxx_messageInfo_Txid.DiscardUnknown(m) +} + +var xxx_messageInfo_Txid proto.InternalMessageInfo + +func (m *Txid) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *Txid) GetHash() string { + if m != nil { + return m.Hash + } + return "" +} + +type FeeLevelSelection struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + FeeLevel FeeLevel `protobuf:"varint,2,opt,name=feeLevel,proto3,enum=pb.FeeLevel" json:"feeLevel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FeeLevelSelection) Reset() { *m = FeeLevelSelection{} } +func (m *FeeLevelSelection) String() string { return proto.CompactTextString(m) } +func (*FeeLevelSelection) ProtoMessage() {} +func (*FeeLevelSelection) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{15} +} +func (m *FeeLevelSelection) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FeeLevelSelection.Unmarshal(m, b) +} +func (m *FeeLevelSelection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FeeLevelSelection.Marshal(b, m, deterministic) +} +func (dst *FeeLevelSelection) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeeLevelSelection.Merge(dst, src) +} +func (m *FeeLevelSelection) XXX_Size() int { + return xxx_messageInfo_FeeLevelSelection.Size(m) +} +func (m *FeeLevelSelection) XXX_DiscardUnknown() { + xxx_messageInfo_FeeLevelSelection.DiscardUnknown(m) +} + +var xxx_messageInfo_FeeLevelSelection proto.InternalMessageInfo + +func (m *FeeLevelSelection) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *FeeLevelSelection) GetFeeLevel() FeeLevel { + if m != nil { + return m.FeeLevel + } + return FeeLevel_ECONOMIC +} + +type FeePerByte struct { + Fee uint64 `protobuf:"varint,1,opt,name=fee,proto3" json:"fee,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FeePerByte) Reset() { *m = FeePerByte{} } +func (m *FeePerByte) String() string { return proto.CompactTextString(m) } +func (*FeePerByte) ProtoMessage() {} +func (*FeePerByte) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{16} +} +func (m *FeePerByte) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FeePerByte.Unmarshal(m, b) +} +func (m *FeePerByte) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FeePerByte.Marshal(b, m, deterministic) +} +func (dst *FeePerByte) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeePerByte.Merge(dst, src) +} +func (m *FeePerByte) XXX_Size() int { + return xxx_messageInfo_FeePerByte.Size(m) +} +func (m *FeePerByte) XXX_DiscardUnknown() { + xxx_messageInfo_FeePerByte.DiscardUnknown(m) +} + +var xxx_messageInfo_FeePerByte proto.InternalMessageInfo + +func (m *FeePerByte) GetFee() uint64 { + if m != nil { + return m.Fee + } + return 0 +} + +type Fee struct { + Fee uint64 `protobuf:"varint,1,opt,name=fee,proto3" json:"fee,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Fee) Reset() { *m = Fee{} } +func (m *Fee) String() string { return proto.CompactTextString(m) } +func (*Fee) ProtoMessage() {} +func (*Fee) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{17} +} +func (m *Fee) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Fee.Unmarshal(m, b) +} +func (m *Fee) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Fee.Marshal(b, m, deterministic) +} +func (dst *Fee) XXX_Merge(src proto.Message) { + xxx_messageInfo_Fee.Merge(dst, src) +} +func (m *Fee) XXX_Size() int { + return xxx_messageInfo_Fee.Size(m) +} +func (m *Fee) XXX_DiscardUnknown() { + xxx_messageInfo_Fee.DiscardUnknown(m) +} + +var xxx_messageInfo_Fee proto.InternalMessageInfo + +func (m *Fee) GetFee() uint64 { + if m != nil { + return m.Fee + } + return 0 +} + +type SpendInfo struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` + FeeLevel FeeLevel `protobuf:"varint,4,opt,name=feeLevel,proto3,enum=pb.FeeLevel" json:"feeLevel,omitempty"` + Memo string `protobuf:"bytes,5,opt,name=memo,proto3" json:"memo,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SpendInfo) Reset() { *m = SpendInfo{} } +func (m *SpendInfo) String() string { return proto.CompactTextString(m) } +func (*SpendInfo) ProtoMessage() {} +func (*SpendInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{18} +} +func (m *SpendInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SpendInfo.Unmarshal(m, b) +} +func (m *SpendInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SpendInfo.Marshal(b, m, deterministic) +} +func (dst *SpendInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SpendInfo.Merge(dst, src) +} +func (m *SpendInfo) XXX_Size() int { + return xxx_messageInfo_SpendInfo.Size(m) +} +func (m *SpendInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SpendInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SpendInfo proto.InternalMessageInfo + +func (m *SpendInfo) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *SpendInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *SpendInfo) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *SpendInfo) GetFeeLevel() FeeLevel { + if m != nil { + return m.FeeLevel + } + return FeeLevel_ECONOMIC +} + +func (m *SpendInfo) GetMemo() string { + if m != nil { + return m.Memo + } + return "" +} + +type Confirmations struct { + Confirmations uint32 `protobuf:"varint,1,opt,name=confirmations,proto3" json:"confirmations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Confirmations) Reset() { *m = Confirmations{} } +func (m *Confirmations) String() string { return proto.CompactTextString(m) } +func (*Confirmations) ProtoMessage() {} +func (*Confirmations) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{19} +} +func (m *Confirmations) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Confirmations.Unmarshal(m, b) +} +func (m *Confirmations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Confirmations.Marshal(b, m, deterministic) +} +func (dst *Confirmations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Confirmations.Merge(dst, src) +} +func (m *Confirmations) XXX_Size() int { + return xxx_messageInfo_Confirmations.Size(m) +} +func (m *Confirmations) XXX_DiscardUnknown() { + xxx_messageInfo_Confirmations.DiscardUnknown(m) +} + +var xxx_messageInfo_Confirmations proto.InternalMessageInfo + +func (m *Confirmations) GetConfirmations() uint32 { + if m != nil { + return m.Confirmations + } + return 0 +} + +type Utxo struct { + Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Value uint64 `protobuf:"varint,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Utxo) Reset() { *m = Utxo{} } +func (m *Utxo) String() string { return proto.CompactTextString(m) } +func (*Utxo) ProtoMessage() {} +func (*Utxo) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{20} +} +func (m *Utxo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Utxo.Unmarshal(m, b) +} +func (m *Utxo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Utxo.Marshal(b, m, deterministic) +} +func (dst *Utxo) XXX_Merge(src proto.Message) { + xxx_messageInfo_Utxo.Merge(dst, src) +} +func (m *Utxo) XXX_Size() int { + return xxx_messageInfo_Utxo.Size(m) +} +func (m *Utxo) XXX_DiscardUnknown() { + xxx_messageInfo_Utxo.DiscardUnknown(m) +} + +var xxx_messageInfo_Utxo proto.InternalMessageInfo + +func (m *Utxo) GetTxid() string { + if m != nil { + return m.Txid + } + return "" +} + +func (m *Utxo) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Utxo) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +type SweepInfo struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Utxos []*Utxo `protobuf:"bytes,2,rep,name=utxos,proto3" json:"utxos,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + RedeemScript []byte `protobuf:"bytes,5,opt,name=redeemScript,proto3" json:"redeemScript,omitempty"` + FeeLevel FeeLevel `protobuf:"varint,6,opt,name=feeLevel,proto3,enum=pb.FeeLevel" json:"feeLevel,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SweepInfo) Reset() { *m = SweepInfo{} } +func (m *SweepInfo) String() string { return proto.CompactTextString(m) } +func (*SweepInfo) ProtoMessage() {} +func (*SweepInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{21} +} +func (m *SweepInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SweepInfo.Unmarshal(m, b) +} +func (m *SweepInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SweepInfo.Marshal(b, m, deterministic) +} +func (dst *SweepInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SweepInfo.Merge(dst, src) +} +func (m *SweepInfo) XXX_Size() int { + return xxx_messageInfo_SweepInfo.Size(m) +} +func (m *SweepInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SweepInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SweepInfo proto.InternalMessageInfo + +func (m *SweepInfo) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *SweepInfo) GetUtxos() []*Utxo { + if m != nil { + return m.Utxos + } + return nil +} + +func (m *SweepInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *SweepInfo) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *SweepInfo) GetRedeemScript() []byte { + if m != nil { + return m.RedeemScript + } + return nil +} + +func (m *SweepInfo) GetFeeLevel() FeeLevel { + if m != nil { + return m.FeeLevel + } + return FeeLevel_ECONOMIC +} + +type Input struct { + Txid string `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Input) Reset() { *m = Input{} } +func (m *Input) String() string { return proto.CompactTextString(m) } +func (*Input) ProtoMessage() {} +func (*Input) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{22} +} +func (m *Input) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Input.Unmarshal(m, b) +} +func (m *Input) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Input.Marshal(b, m, deterministic) +} +func (dst *Input) XXX_Merge(src proto.Message) { + xxx_messageInfo_Input.Merge(dst, src) +} +func (m *Input) XXX_Size() int { + return xxx_messageInfo_Input.Size(m) +} +func (m *Input) XXX_DiscardUnknown() { + xxx_messageInfo_Input.DiscardUnknown(m) +} + +var xxx_messageInfo_Input proto.InternalMessageInfo + +func (m *Input) GetTxid() string { + if m != nil { + return m.Txid + } + return "" +} + +func (m *Input) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +type Output struct { + ScriptPubKey []byte `protobuf:"bytes,1,opt,name=scriptPubKey,proto3" json:"scriptPubKey,omitempty"` + Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Output) Reset() { *m = Output{} } +func (m *Output) String() string { return proto.CompactTextString(m) } +func (*Output) ProtoMessage() {} +func (*Output) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{23} +} +func (m *Output) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Output.Unmarshal(m, b) +} +func (m *Output) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Output.Marshal(b, m, deterministic) +} +func (dst *Output) XXX_Merge(src proto.Message) { + xxx_messageInfo_Output.Merge(dst, src) +} +func (m *Output) XXX_Size() int { + return xxx_messageInfo_Output.Size(m) +} +func (m *Output) XXX_DiscardUnknown() { + xxx_messageInfo_Output.DiscardUnknown(m) +} + +var xxx_messageInfo_Output proto.InternalMessageInfo + +func (m *Output) GetScriptPubKey() []byte { + if m != nil { + return m.ScriptPubKey + } + return nil +} + +func (m *Output) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +type Signature struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Signature) Reset() { *m = Signature{} } +func (m *Signature) String() string { return proto.CompactTextString(m) } +func (*Signature) ProtoMessage() {} +func (*Signature) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{24} +} +func (m *Signature) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Signature.Unmarshal(m, b) +} +func (m *Signature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Signature.Marshal(b, m, deterministic) +} +func (dst *Signature) XXX_Merge(src proto.Message) { + xxx_messageInfo_Signature.Merge(dst, src) +} +func (m *Signature) XXX_Size() int { + return xxx_messageInfo_Signature.Size(m) +} +func (m *Signature) XXX_DiscardUnknown() { + xxx_messageInfo_Signature.DiscardUnknown(m) +} + +var xxx_messageInfo_Signature proto.InternalMessageInfo + +func (m *Signature) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *Signature) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +type CreateMultisigInfo struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Inputs []*Input `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []*Output `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty"` + Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + RedeemScript []byte `protobuf:"bytes,5,opt,name=redeemScript,proto3" json:"redeemScript,omitempty"` + FeePerByte uint64 `protobuf:"varint,6,opt,name=feePerByte,proto3" json:"feePerByte,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateMultisigInfo) Reset() { *m = CreateMultisigInfo{} } +func (m *CreateMultisigInfo) String() string { return proto.CompactTextString(m) } +func (*CreateMultisigInfo) ProtoMessage() {} +func (*CreateMultisigInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{25} +} +func (m *CreateMultisigInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateMultisigInfo.Unmarshal(m, b) +} +func (m *CreateMultisigInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateMultisigInfo.Marshal(b, m, deterministic) +} +func (dst *CreateMultisigInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateMultisigInfo.Merge(dst, src) +} +func (m *CreateMultisigInfo) XXX_Size() int { + return xxx_messageInfo_CreateMultisigInfo.Size(m) +} +func (m *CreateMultisigInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CreateMultisigInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateMultisigInfo proto.InternalMessageInfo + +func (m *CreateMultisigInfo) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *CreateMultisigInfo) GetInputs() []*Input { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *CreateMultisigInfo) GetOutputs() []*Output { + if m != nil { + return m.Outputs + } + return nil +} + +func (m *CreateMultisigInfo) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *CreateMultisigInfo) GetRedeemScript() []byte { + if m != nil { + return m.RedeemScript + } + return nil +} + +func (m *CreateMultisigInfo) GetFeePerByte() uint64 { + if m != nil { + return m.FeePerByte + } + return 0 +} + +type SignatureList struct { + Sigs []*Signature `protobuf:"bytes,1,rep,name=sigs,proto3" json:"sigs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignatureList) Reset() { *m = SignatureList{} } +func (m *SignatureList) String() string { return proto.CompactTextString(m) } +func (*SignatureList) ProtoMessage() {} +func (*SignatureList) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{26} +} +func (m *SignatureList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignatureList.Unmarshal(m, b) +} +func (m *SignatureList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignatureList.Marshal(b, m, deterministic) +} +func (dst *SignatureList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureList.Merge(dst, src) +} +func (m *SignatureList) XXX_Size() int { + return xxx_messageInfo_SignatureList.Size(m) +} +func (m *SignatureList) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureList.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureList proto.InternalMessageInfo + +func (m *SignatureList) GetSigs() []*Signature { + if m != nil { + return m.Sigs + } + return nil +} + +type MultisignInfo struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Inputs []*Input `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []*Output `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty"` + Sig1 []*Signature `protobuf:"bytes,4,rep,name=sig1,proto3" json:"sig1,omitempty"` + Sig2 []*Signature `protobuf:"bytes,5,rep,name=sig2,proto3" json:"sig2,omitempty"` + RedeemScript []byte `protobuf:"bytes,6,opt,name=redeemScript,proto3" json:"redeemScript,omitempty"` + FeePerByte uint64 `protobuf:"varint,7,opt,name=feePerByte,proto3" json:"feePerByte,omitempty"` + Broadcast bool `protobuf:"varint,8,opt,name=broadcast,proto3" json:"broadcast,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MultisignInfo) Reset() { *m = MultisignInfo{} } +func (m *MultisignInfo) String() string { return proto.CompactTextString(m) } +func (*MultisignInfo) ProtoMessage() {} +func (*MultisignInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{27} +} +func (m *MultisignInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MultisignInfo.Unmarshal(m, b) +} +func (m *MultisignInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MultisignInfo.Marshal(b, m, deterministic) +} +func (dst *MultisignInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_MultisignInfo.Merge(dst, src) +} +func (m *MultisignInfo) XXX_Size() int { + return xxx_messageInfo_MultisignInfo.Size(m) +} +func (m *MultisignInfo) XXX_DiscardUnknown() { + xxx_messageInfo_MultisignInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_MultisignInfo proto.InternalMessageInfo + +func (m *MultisignInfo) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *MultisignInfo) GetInputs() []*Input { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *MultisignInfo) GetOutputs() []*Output { + if m != nil { + return m.Outputs + } + return nil +} + +func (m *MultisignInfo) GetSig1() []*Signature { + if m != nil { + return m.Sig1 + } + return nil +} + +func (m *MultisignInfo) GetSig2() []*Signature { + if m != nil { + return m.Sig2 + } + return nil +} + +func (m *MultisignInfo) GetRedeemScript() []byte { + if m != nil { + return m.RedeemScript + } + return nil +} + +func (m *MultisignInfo) GetFeePerByte() uint64 { + if m != nil { + return m.FeePerByte + } + return 0 +} + +func (m *MultisignInfo) GetBroadcast() bool { + if m != nil { + return m.Broadcast + } + return false +} + +type RawTx struct { + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawTx) Reset() { *m = RawTx{} } +func (m *RawTx) String() string { return proto.CompactTextString(m) } +func (*RawTx) ProtoMessage() {} +func (*RawTx) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{28} +} +func (m *RawTx) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RawTx.Unmarshal(m, b) +} +func (m *RawTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RawTx.Marshal(b, m, deterministic) +} +func (dst *RawTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawTx.Merge(dst, src) +} +func (m *RawTx) XXX_Size() int { + return xxx_messageInfo_RawTx.Size(m) +} +func (m *RawTx) XXX_DiscardUnknown() { + xxx_messageInfo_RawTx.DiscardUnknown(m) +} + +var xxx_messageInfo_RawTx proto.InternalMessageInfo + +func (m *RawTx) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil +} + +type EstimateFeeData struct { + Coin CoinType `protobuf:"varint,1,opt,name=coin,proto3,enum=pb.CoinType" json:"coin,omitempty"` + Inputs []*Input `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Outputs []*Output `protobuf:"bytes,3,rep,name=outputs,proto3" json:"outputs,omitempty"` + FeePerByte uint64 `protobuf:"varint,4,opt,name=feePerByte,proto3" json:"feePerByte,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EstimateFeeData) Reset() { *m = EstimateFeeData{} } +func (m *EstimateFeeData) String() string { return proto.CompactTextString(m) } +func (*EstimateFeeData) ProtoMessage() {} +func (*EstimateFeeData) Descriptor() ([]byte, []int) { + return fileDescriptor_api_2ff753dddd9b028a, []int{29} +} +func (m *EstimateFeeData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EstimateFeeData.Unmarshal(m, b) +} +func (m *EstimateFeeData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EstimateFeeData.Marshal(b, m, deterministic) +} +func (dst *EstimateFeeData) XXX_Merge(src proto.Message) { + xxx_messageInfo_EstimateFeeData.Merge(dst, src) +} +func (m *EstimateFeeData) XXX_Size() int { + return xxx_messageInfo_EstimateFeeData.Size(m) +} +func (m *EstimateFeeData) XXX_DiscardUnknown() { + xxx_messageInfo_EstimateFeeData.DiscardUnknown(m) +} + +var xxx_messageInfo_EstimateFeeData proto.InternalMessageInfo + +func (m *EstimateFeeData) GetCoin() CoinType { + if m != nil { + return m.Coin + } + return CoinType_BITCOIN +} + +func (m *EstimateFeeData) GetInputs() []*Input { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *EstimateFeeData) GetOutputs() []*Output { + if m != nil { + return m.Outputs + } + return nil +} + +func (m *EstimateFeeData) GetFeePerByte() uint64 { + if m != nil { + return m.FeePerByte + } + return 0 +} + +func init() { + proto.RegisterType((*Empty)(nil), "pb.Empty") + proto.RegisterType((*CoinSelection)(nil), "pb.CoinSelection") + proto.RegisterType((*Row)(nil), "pb.Row") + proto.RegisterType((*KeySelection)(nil), "pb.KeySelection") + proto.RegisterType((*Address)(nil), "pb.Address") + proto.RegisterType((*Height)(nil), "pb.Height") + proto.RegisterType((*Balances)(nil), "pb.Balances") + proto.RegisterType((*Key)(nil), "pb.Key") + proto.RegisterType((*Keys)(nil), "pb.Keys") + proto.RegisterType((*Addresses)(nil), "pb.Addresses") + proto.RegisterType((*BoolResponse)(nil), "pb.BoolResponse") + proto.RegisterType((*NetParams)(nil), "pb.NetParams") + proto.RegisterType((*TransactionList)(nil), "pb.TransactionList") + proto.RegisterType((*Tx)(nil), "pb.Tx") + proto.RegisterType((*Txid)(nil), "pb.Txid") + proto.RegisterType((*FeeLevelSelection)(nil), "pb.FeeLevelSelection") + proto.RegisterType((*FeePerByte)(nil), "pb.FeePerByte") + proto.RegisterType((*Fee)(nil), "pb.Fee") + proto.RegisterType((*SpendInfo)(nil), "pb.SpendInfo") + proto.RegisterType((*Confirmations)(nil), "pb.Confirmations") + proto.RegisterType((*Utxo)(nil), "pb.Utxo") + proto.RegisterType((*SweepInfo)(nil), "pb.SweepInfo") + proto.RegisterType((*Input)(nil), "pb.Input") + proto.RegisterType((*Output)(nil), "pb.Output") + proto.RegisterType((*Signature)(nil), "pb.Signature") + proto.RegisterType((*CreateMultisigInfo)(nil), "pb.CreateMultisigInfo") + proto.RegisterType((*SignatureList)(nil), "pb.SignatureList") + proto.RegisterType((*MultisignInfo)(nil), "pb.MultisignInfo") + proto.RegisterType((*RawTx)(nil), "pb.RawTx") + proto.RegisterType((*EstimateFeeData)(nil), "pb.EstimateFeeData") + proto.RegisterEnum("pb.CoinType", CoinType_name, CoinType_value) + proto.RegisterEnum("pb.KeyPurpose", KeyPurpose_name, KeyPurpose_value) + proto.RegisterEnum("pb.FeeLevel", FeeLevel_name, FeeLevel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// APIClient is the client API for API service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type APIClient interface { + Stop(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + CurrentAddress(ctx context.Context, in *KeySelection, opts ...grpc.CallOption) (*Address, error) + NewAddress(ctx context.Context, in *KeySelection, opts ...grpc.CallOption) (*Address, error) + ChainTip(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Height, error) + Balance(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Balances, error) + MasterPrivateKey(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Key, error) + MasterPublicKey(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Key, error) + HasKey(ctx context.Context, in *Address, opts ...grpc.CallOption) (*BoolResponse, error) + Params(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*NetParams, error) + Transactions(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*TransactionList, error) + GetTransaction(ctx context.Context, in *Txid, opts ...grpc.CallOption) (*Tx, error) + GetFeePerByte(ctx context.Context, in *FeeLevelSelection, opts ...grpc.CallOption) (*FeePerByte, error) + Spend(ctx context.Context, in *SpendInfo, opts ...grpc.CallOption) (*Txid, error) + BumpFee(ctx context.Context, in *Txid, opts ...grpc.CallOption) (*Txid, error) + AddWatchedScript(ctx context.Context, in *Address, opts ...grpc.CallOption) (*Empty, error) + GetConfirmations(ctx context.Context, in *Txid, opts ...grpc.CallOption) (*Confirmations, error) + SweepAddress(ctx context.Context, in *SweepInfo, opts ...grpc.CallOption) (*Txid, error) + CreateMultisigSignature(ctx context.Context, in *CreateMultisigInfo, opts ...grpc.CallOption) (*SignatureList, error) + Multisign(ctx context.Context, in *MultisignInfo, opts ...grpc.CallOption) (*RawTx, error) + EstimateFee(ctx context.Context, in *EstimateFeeData, opts ...grpc.CallOption) (*Fee, error) + GetKey(ctx context.Context, in *Address, opts ...grpc.CallOption) (*Key, error) + ListKeys(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Keys, error) + ListAddresses(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Addresses, error) + WalletNotify(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (API_WalletNotifyClient, error) + DumpTables(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (API_DumpTablesClient, error) +} + +type aPIClient struct { + cc *grpc.ClientConn +} + +func NewAPIClient(cc *grpc.ClientConn) APIClient { + return &aPIClient{cc} +} + +func (c *aPIClient) Stop(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/pb.API/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CurrentAddress(ctx context.Context, in *KeySelection, opts ...grpc.CallOption) (*Address, error) { + out := new(Address) + err := c.cc.Invoke(ctx, "/pb.API/CurrentAddress", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) NewAddress(ctx context.Context, in *KeySelection, opts ...grpc.CallOption) (*Address, error) { + out := new(Address) + err := c.cc.Invoke(ctx, "/pb.API/NewAddress", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) ChainTip(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Height, error) { + out := new(Height) + err := c.cc.Invoke(ctx, "/pb.API/ChainTip", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Balance(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Balances, error) { + out := new(Balances) + err := c.cc.Invoke(ctx, "/pb.API/Balance", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) MasterPrivateKey(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Key, error) { + out := new(Key) + err := c.cc.Invoke(ctx, "/pb.API/MasterPrivateKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) MasterPublicKey(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Key, error) { + out := new(Key) + err := c.cc.Invoke(ctx, "/pb.API/MasterPublicKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) HasKey(ctx context.Context, in *Address, opts ...grpc.CallOption) (*BoolResponse, error) { + out := new(BoolResponse) + err := c.cc.Invoke(ctx, "/pb.API/HasKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Params(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*NetParams, error) { + out := new(NetParams) + err := c.cc.Invoke(ctx, "/pb.API/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Transactions(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*TransactionList, error) { + out := new(TransactionList) + err := c.cc.Invoke(ctx, "/pb.API/Transactions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) GetTransaction(ctx context.Context, in *Txid, opts ...grpc.CallOption) (*Tx, error) { + out := new(Tx) + err := c.cc.Invoke(ctx, "/pb.API/GetTransaction", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) GetFeePerByte(ctx context.Context, in *FeeLevelSelection, opts ...grpc.CallOption) (*FeePerByte, error) { + out := new(FeePerByte) + err := c.cc.Invoke(ctx, "/pb.API/GetFeePerByte", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Spend(ctx context.Context, in *SpendInfo, opts ...grpc.CallOption) (*Txid, error) { + out := new(Txid) + err := c.cc.Invoke(ctx, "/pb.API/Spend", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) BumpFee(ctx context.Context, in *Txid, opts ...grpc.CallOption) (*Txid, error) { + out := new(Txid) + err := c.cc.Invoke(ctx, "/pb.API/BumpFee", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) AddWatchedScript(ctx context.Context, in *Address, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/pb.API/AddWatchedScript", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) GetConfirmations(ctx context.Context, in *Txid, opts ...grpc.CallOption) (*Confirmations, error) { + out := new(Confirmations) + err := c.cc.Invoke(ctx, "/pb.API/GetConfirmations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) SweepAddress(ctx context.Context, in *SweepInfo, opts ...grpc.CallOption) (*Txid, error) { + out := new(Txid) + err := c.cc.Invoke(ctx, "/pb.API/SweepAddress", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) CreateMultisigSignature(ctx context.Context, in *CreateMultisigInfo, opts ...grpc.CallOption) (*SignatureList, error) { + out := new(SignatureList) + err := c.cc.Invoke(ctx, "/pb.API/CreateMultisigSignature", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) Multisign(ctx context.Context, in *MultisignInfo, opts ...grpc.CallOption) (*RawTx, error) { + out := new(RawTx) + err := c.cc.Invoke(ctx, "/pb.API/Multisign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) EstimateFee(ctx context.Context, in *EstimateFeeData, opts ...grpc.CallOption) (*Fee, error) { + out := new(Fee) + err := c.cc.Invoke(ctx, "/pb.API/EstimateFee", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) GetKey(ctx context.Context, in *Address, opts ...grpc.CallOption) (*Key, error) { + out := new(Key) + err := c.cc.Invoke(ctx, "/pb.API/GetKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) ListKeys(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Keys, error) { + out := new(Keys) + err := c.cc.Invoke(ctx, "/pb.API/ListKeys", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) ListAddresses(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (*Addresses, error) { + out := new(Addresses) + err := c.cc.Invoke(ctx, "/pb.API/ListAddresses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aPIClient) WalletNotify(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (API_WalletNotifyClient, error) { + stream, err := c.cc.NewStream(ctx, &_API_serviceDesc.Streams[0], "/pb.API/WalletNotify", opts...) + if err != nil { + return nil, err + } + x := &aPIWalletNotifyClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type API_WalletNotifyClient interface { + Recv() (*Tx, error) + grpc.ClientStream +} + +type aPIWalletNotifyClient struct { + grpc.ClientStream +} + +func (x *aPIWalletNotifyClient) Recv() (*Tx, error) { + m := new(Tx) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *aPIClient) DumpTables(ctx context.Context, in *CoinSelection, opts ...grpc.CallOption) (API_DumpTablesClient, error) { + stream, err := c.cc.NewStream(ctx, &_API_serviceDesc.Streams[1], "/pb.API/DumpTables", opts...) + if err != nil { + return nil, err + } + x := &aPIDumpTablesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type API_DumpTablesClient interface { + Recv() (*Row, error) + grpc.ClientStream +} + +type aPIDumpTablesClient struct { + grpc.ClientStream +} + +func (x *aPIDumpTablesClient) Recv() (*Row, error) { + m := new(Row) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// APIServer is the server API for API service. +type APIServer interface { + Stop(context.Context, *Empty) (*Empty, error) + CurrentAddress(context.Context, *KeySelection) (*Address, error) + NewAddress(context.Context, *KeySelection) (*Address, error) + ChainTip(context.Context, *CoinSelection) (*Height, error) + Balance(context.Context, *CoinSelection) (*Balances, error) + MasterPrivateKey(context.Context, *CoinSelection) (*Key, error) + MasterPublicKey(context.Context, *CoinSelection) (*Key, error) + HasKey(context.Context, *Address) (*BoolResponse, error) + Params(context.Context, *Empty) (*NetParams, error) + Transactions(context.Context, *CoinSelection) (*TransactionList, error) + GetTransaction(context.Context, *Txid) (*Tx, error) + GetFeePerByte(context.Context, *FeeLevelSelection) (*FeePerByte, error) + Spend(context.Context, *SpendInfo) (*Txid, error) + BumpFee(context.Context, *Txid) (*Txid, error) + AddWatchedScript(context.Context, *Address) (*Empty, error) + GetConfirmations(context.Context, *Txid) (*Confirmations, error) + SweepAddress(context.Context, *SweepInfo) (*Txid, error) + CreateMultisigSignature(context.Context, *CreateMultisigInfo) (*SignatureList, error) + Multisign(context.Context, *MultisignInfo) (*RawTx, error) + EstimateFee(context.Context, *EstimateFeeData) (*Fee, error) + GetKey(context.Context, *Address) (*Key, error) + ListKeys(context.Context, *CoinSelection) (*Keys, error) + ListAddresses(context.Context, *CoinSelection) (*Addresses, error) + WalletNotify(*CoinSelection, API_WalletNotifyServer) error + DumpTables(*CoinSelection, API_DumpTablesServer) error +} + +func RegisterAPIServer(s *grpc.Server, srv APIServer) { + s.RegisterService(&_API_serviceDesc, srv) +} + +func _API_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Stop(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CurrentAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeySelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CurrentAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/CurrentAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CurrentAddress(ctx, req.(*KeySelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_NewAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KeySelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).NewAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/NewAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).NewAddress(ctx, req.(*KeySelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_ChainTip_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoinSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).ChainTip(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/ChainTip", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ChainTip(ctx, req.(*CoinSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Balance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoinSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Balance(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/Balance", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Balance(ctx, req.(*CoinSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_MasterPrivateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoinSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).MasterPrivateKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/MasterPrivateKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).MasterPrivateKey(ctx, req.(*CoinSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_MasterPublicKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoinSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).MasterPublicKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/MasterPublicKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).MasterPublicKey(ctx, req.(*CoinSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_HasKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Address) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).HasKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/HasKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).HasKey(ctx, req.(*Address)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Params(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Transactions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoinSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Transactions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/Transactions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Transactions(ctx, req.(*CoinSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_GetTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Txid) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetTransaction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/GetTransaction", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetTransaction(ctx, req.(*Txid)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_GetFeePerByte_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FeeLevelSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetFeePerByte(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/GetFeePerByte", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetFeePerByte(ctx, req.(*FeeLevelSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Spend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SpendInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Spend(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/Spend", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Spend(ctx, req.(*SpendInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_BumpFee_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Txid) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).BumpFee(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/BumpFee", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).BumpFee(ctx, req.(*Txid)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_AddWatchedScript_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Address) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).AddWatchedScript(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/AddWatchedScript", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).AddWatchedScript(ctx, req.(*Address)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_GetConfirmations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Txid) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetConfirmations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/GetConfirmations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetConfirmations(ctx, req.(*Txid)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_SweepAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SweepInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).SweepAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/SweepAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).SweepAddress(ctx, req.(*SweepInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_CreateMultisigSignature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateMultisigInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).CreateMultisigSignature(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/CreateMultisigSignature", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).CreateMultisigSignature(ctx, req.(*CreateMultisigInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_Multisign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MultisignInfo) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).Multisign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/Multisign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).Multisign(ctx, req.(*MultisignInfo)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_EstimateFee_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EstimateFeeData) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).EstimateFee(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/EstimateFee", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).EstimateFee(ctx, req.(*EstimateFeeData)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_GetKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Address) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).GetKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/GetKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).GetKey(ctx, req.(*Address)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_ListKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoinSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).ListKeys(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/ListKeys", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ListKeys(ctx, req.(*CoinSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_ListAddresses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CoinSelection) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(APIServer).ListAddresses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.API/ListAddresses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(APIServer).ListAddresses(ctx, req.(*CoinSelection)) + } + return interceptor(ctx, in, info, handler) +} + +func _API_WalletNotify_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CoinSelection) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(APIServer).WalletNotify(m, &aPIWalletNotifyServer{stream}) +} + +type API_WalletNotifyServer interface { + Send(*Tx) error + grpc.ServerStream +} + +type aPIWalletNotifyServer struct { + grpc.ServerStream +} + +func (x *aPIWalletNotifyServer) Send(m *Tx) error { + return x.ServerStream.SendMsg(m) +} + +func _API_DumpTables_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CoinSelection) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(APIServer).DumpTables(m, &aPIDumpTablesServer{stream}) +} + +type API_DumpTablesServer interface { + Send(*Row) error + grpc.ServerStream +} + +type aPIDumpTablesServer struct { + grpc.ServerStream +} + +func (x *aPIDumpTablesServer) Send(m *Row) error { + return x.ServerStream.SendMsg(m) +} + +var _API_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pb.API", + HandlerType: (*APIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Stop", + Handler: _API_Stop_Handler, + }, + { + MethodName: "CurrentAddress", + Handler: _API_CurrentAddress_Handler, + }, + { + MethodName: "NewAddress", + Handler: _API_NewAddress_Handler, + }, + { + MethodName: "ChainTip", + Handler: _API_ChainTip_Handler, + }, + { + MethodName: "Balance", + Handler: _API_Balance_Handler, + }, + { + MethodName: "MasterPrivateKey", + Handler: _API_MasterPrivateKey_Handler, + }, + { + MethodName: "MasterPublicKey", + Handler: _API_MasterPublicKey_Handler, + }, + { + MethodName: "HasKey", + Handler: _API_HasKey_Handler, + }, + { + MethodName: "Params", + Handler: _API_Params_Handler, + }, + { + MethodName: "Transactions", + Handler: _API_Transactions_Handler, + }, + { + MethodName: "GetTransaction", + Handler: _API_GetTransaction_Handler, + }, + { + MethodName: "GetFeePerByte", + Handler: _API_GetFeePerByte_Handler, + }, + { + MethodName: "Spend", + Handler: _API_Spend_Handler, + }, + { + MethodName: "BumpFee", + Handler: _API_BumpFee_Handler, + }, + { + MethodName: "AddWatchedScript", + Handler: _API_AddWatchedScript_Handler, + }, + { + MethodName: "GetConfirmations", + Handler: _API_GetConfirmations_Handler, + }, + { + MethodName: "SweepAddress", + Handler: _API_SweepAddress_Handler, + }, + { + MethodName: "CreateMultisigSignature", + Handler: _API_CreateMultisigSignature_Handler, + }, + { + MethodName: "Multisign", + Handler: _API_Multisign_Handler, + }, + { + MethodName: "EstimateFee", + Handler: _API_EstimateFee_Handler, + }, + { + MethodName: "GetKey", + Handler: _API_GetKey_Handler, + }, + { + MethodName: "ListKeys", + Handler: _API_ListKeys_Handler, + }, + { + MethodName: "ListAddresses", + Handler: _API_ListAddresses_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "WalletNotify", + Handler: _API_WalletNotify_Handler, + ServerStreams: true, + }, + { + StreamName: "DumpTables", + Handler: _API_DumpTables_Handler, + ServerStreams: true, + }, + }, + Metadata: "api.proto", +} + +func init() { proto.RegisterFile("api.proto", fileDescriptor_api_2ff753dddd9b028a) } + +var fileDescriptor_api_2ff753dddd9b028a = []byte{ + // 1449 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcd, 0x72, 0xdb, 0x46, + 0x12, 0xe6, 0x0f, 0xf8, 0x83, 0x16, 0x28, 0xd3, 0xb3, 0xbb, 0x36, 0x57, 0xeb, 0x92, 0xe9, 0x59, + 0x1f, 0x64, 0xad, 0x57, 0xb6, 0xe8, 0x4a, 0xca, 0x87, 0xa4, 0x5c, 0x12, 0xad, 0x1f, 0x46, 0x12, + 0xc5, 0x1a, 0xd1, 0xe5, 0xc4, 0x17, 0xd7, 0x90, 0x68, 0x49, 0x28, 0x93, 0x00, 0x0a, 0x18, 0x58, + 0xe4, 0x3d, 0x8f, 0x91, 0x5c, 0xf2, 0x04, 0x79, 0x8b, 0x3c, 0x41, 0xde, 0x27, 0x35, 0x83, 0x01, + 0x01, 0xc8, 0xb4, 0x2d, 0xe7, 0xe0, 0x5b, 0x4f, 0xf7, 0x87, 0x99, 0xee, 0xaf, 0x7b, 0x7a, 0x1a, + 0x60, 0x72, 0xdf, 0xd9, 0xf2, 0x03, 0x4f, 0x78, 0xa4, 0xe4, 0x8f, 0xd6, 0xee, 0x5f, 0x78, 0xde, + 0xc5, 0x04, 0x9f, 0x28, 0xcd, 0x28, 0x3a, 0x7f, 0x22, 0x9c, 0x29, 0x86, 0x82, 0x4f, 0xfd, 0x18, + 0x44, 0x6b, 0x50, 0xd9, 0x9b, 0xfa, 0x62, 0x4e, 0xb7, 0xa1, 0xd1, 0xf5, 0x1c, 0xf7, 0x0c, 0x27, + 0x38, 0x16, 0x8e, 0xe7, 0x92, 0x36, 0x18, 0x63, 0xcf, 0x71, 0x5b, 0xc5, 0x76, 0x71, 0x63, 0xb5, + 0x63, 0x6d, 0xf9, 0xa3, 0x2d, 0x09, 0x18, 0xce, 0x7d, 0x64, 0xca, 0x42, 0xff, 0x0d, 0x65, 0xe6, + 0x5d, 0x11, 0x02, 0x86, 0xcd, 0x05, 0x57, 0x40, 0x93, 0x29, 0x99, 0xbe, 0x01, 0xeb, 0x08, 0xe7, + 0x5f, 0xb0, 0x19, 0xd9, 0x80, 0x9a, 0x1f, 0x05, 0xbe, 0x17, 0x62, 0xab, 0xa4, 0x40, 0xab, 0x12, + 0x74, 0x84, 0xf3, 0x41, 0xac, 0x65, 0x89, 0x99, 0xbe, 0x80, 0xda, 0x8e, 0x6d, 0x07, 0x18, 0x86, + 0x37, 0xd8, 0x96, 0x80, 0xc1, 0x6d, 0x3b, 0x50, 0x7b, 0x9a, 0x4c, 0xc9, 0xb4, 0x0d, 0xd5, 0x43, + 0x74, 0x2e, 0x2e, 0x05, 0xb9, 0x03, 0xd5, 0x4b, 0x25, 0xa9, 0x1d, 0x1a, 0x4c, 0xaf, 0xe8, 0x0f, + 0x50, 0xdf, 0xe5, 0x13, 0xee, 0x8e, 0x31, 0x24, 0xf7, 0xc0, 0x1c, 0x7b, 0xee, 0xb9, 0x13, 0x4c, + 0xd1, 0x56, 0x30, 0x83, 0xa5, 0x0a, 0xd2, 0x86, 0x95, 0xc8, 0x4d, 0xed, 0x25, 0x65, 0xcf, 0xaa, + 0xe8, 0x5d, 0x28, 0x1f, 0xe1, 0x9c, 0x34, 0xa1, 0xfc, 0x0e, 0xe7, 0x9a, 0x24, 0x29, 0xd2, 0xff, + 0x82, 0x71, 0x84, 0xf3, 0x90, 0xfc, 0x07, 0x8c, 0x77, 0x38, 0x0f, 0x5b, 0xc5, 0x76, 0x79, 0x63, + 0xa5, 0x53, 0xd3, 0x61, 0x33, 0xa5, 0xa4, 0xdf, 0x82, 0xa9, 0x83, 0xc5, 0x90, 0x3c, 0x02, 0x93, + 0x27, 0x0b, 0x0d, 0x5f, 0x91, 0x70, 0x8d, 0x60, 0xa9, 0x95, 0x52, 0xb0, 0x76, 0x3d, 0x6f, 0xc2, + 0x30, 0xf4, 0x3d, 0x37, 0x44, 0xc9, 0xc3, 0xc8, 0xf3, 0x26, 0xea, 0xfc, 0x3a, 0x53, 0x32, 0xbd, + 0x0f, 0x66, 0x1f, 0xc5, 0x80, 0x07, 0x7c, 0x1a, 0x4a, 0x80, 0xcb, 0xa7, 0x98, 0x64, 0x51, 0xca, + 0xf4, 0x7b, 0xb8, 0x35, 0x0c, 0xb8, 0x1b, 0x72, 0x95, 0xc4, 0x63, 0x27, 0x14, 0x64, 0x13, 0x2c, + 0x91, 0xaa, 0x12, 0x2f, 0xaa, 0xd2, 0x8b, 0xe1, 0x8c, 0xe5, 0x6c, 0xf4, 0xf7, 0x22, 0x94, 0x86, + 0x33, 0xb9, 0xb3, 0x98, 0x39, 0x76, 0xb2, 0xb3, 0x94, 0xc9, 0x3f, 0xa1, 0xf2, 0x9e, 0x4f, 0xa2, + 0x38, 0xd7, 0x65, 0x16, 0x2f, 0x32, 0xe9, 0x28, 0xb7, 0x8b, 0x1b, 0x95, 0x24, 0x1d, 0xe4, 0x39, + 0x98, 0x8b, 0xba, 0x6d, 0x19, 0xed, 0xe2, 0xc6, 0x4a, 0x67, 0x6d, 0x2b, 0xae, 0xec, 0xad, 0xa4, + 0xb2, 0xb7, 0x86, 0x09, 0x82, 0xa5, 0x60, 0x99, 0xbc, 0x2b, 0x2e, 0xc6, 0x97, 0xa7, 0xee, 0x64, + 0xde, 0xaa, 0xa8, 0xd8, 0x53, 0x85, 0xcc, 0x49, 0xc0, 0xaf, 0x5a, 0xd5, 0x76, 0x71, 0xc3, 0x62, + 0x52, 0xa4, 0xdf, 0x81, 0x31, 0x94, 0xfe, 0xdd, 0xa8, 0xb0, 0x2e, 0x79, 0x78, 0x99, 0x14, 0x96, + 0x94, 0xe9, 0x5b, 0xb8, 0xbd, 0x8f, 0x78, 0x8c, 0xef, 0x71, 0xf2, 0x65, 0xa5, 0x5f, 0x3f, 0xd7, + 0x9f, 0xe9, 0xda, 0x57, 0xa8, 0x64, 0x2b, 0xb6, 0xb0, 0xd2, 0x75, 0x80, 0x7d, 0xc4, 0x01, 0x06, + 0xbb, 0x73, 0x81, 0xd2, 0xfd, 0x73, 0x44, 0x5d, 0x93, 0x52, 0x94, 0xb5, 0xb6, 0x8f, 0xcb, 0x0c, + 0xbf, 0x16, 0xc1, 0x3c, 0xf3, 0xd1, 0xb5, 0x7b, 0xee, 0xb9, 0x77, 0x03, 0x97, 0x5a, 0x50, 0xd3, + 0xb5, 0xa4, 0x03, 0x4c, 0x96, 0x32, 0x47, 0x7c, 0xea, 0x45, 0x6e, 0x9c, 0x23, 0x83, 0xe9, 0x55, + 0x2e, 0x08, 0xe3, 0x53, 0x41, 0x48, 0xe6, 0xa6, 0x38, 0xf5, 0x54, 0x3a, 0x4c, 0xa6, 0x64, 0xfa, + 0x8d, 0xec, 0x3e, 0xea, 0xc6, 0x70, 0x55, 0x3b, 0xe4, 0x21, 0x34, 0xc6, 0x59, 0x85, 0xbe, 0xa0, + 0x79, 0x25, 0xdd, 0x07, 0xe3, 0x95, 0x98, 0x79, 0x1f, 0x2b, 0x31, 0xc7, 0xb5, 0x71, 0xa6, 0x02, + 0x68, 0xb0, 0x78, 0x91, 0x16, 0x5e, 0xec, 0x7d, 0xbc, 0xa0, 0x7f, 0x48, 0x7a, 0xae, 0x10, 0xfd, + 0x1b, 0xd2, 0xb3, 0x0e, 0x95, 0x48, 0xcc, 0x3c, 0x49, 0x8e, 0x2c, 0xff, 0xba, 0x84, 0x48, 0x47, + 0x58, 0xac, 0xce, 0xd2, 0x57, 0xce, 0xd3, 0xa7, 0xdb, 0x80, 0xb1, 0x68, 0x03, 0x84, 0x82, 0x15, + 0xa0, 0x8d, 0x38, 0x3d, 0x1b, 0x07, 0x8e, 0x2f, 0x14, 0x2d, 0x16, 0xcb, 0xe9, 0x72, 0xe4, 0x56, + 0x3f, 0x59, 0x21, 0xdb, 0x50, 0xe9, 0xb9, 0x7e, 0x24, 0x6e, 0x4e, 0x09, 0xdd, 0x85, 0xea, 0x69, + 0x24, 0xe4, 0x37, 0x14, 0xac, 0x50, 0x1d, 0x38, 0x88, 0x46, 0x47, 0xba, 0x59, 0x59, 0x2c, 0xa7, + 0xcb, 0xdf, 0xdc, 0x05, 0x81, 0x2f, 0xc0, 0x3c, 0x73, 0x2e, 0x5c, 0x2e, 0xa2, 0x00, 0xd3, 0x63, + 0x8a, 0x59, 0xe6, 0xef, 0x81, 0x19, 0x26, 0x10, 0xf5, 0xb1, 0xc5, 0x52, 0x05, 0xfd, 0xb3, 0x08, + 0xa4, 0x1b, 0x20, 0x17, 0x78, 0x12, 0x4d, 0x84, 0x13, 0x3a, 0x17, 0x37, 0x4c, 0xc5, 0x03, 0xa8, + 0x3a, 0x32, 0xe0, 0x24, 0x17, 0xa6, 0xc4, 0x28, 0x0a, 0x98, 0x36, 0x90, 0x87, 0x50, 0xf3, 0x54, + 0x80, 0x32, 0x1b, 0x12, 0x03, 0x12, 0x13, 0xc7, 0xcc, 0x12, 0xd3, 0xdf, 0xcc, 0xcc, 0x3a, 0xc0, + 0xf9, 0xe2, 0x46, 0xaa, 0xdc, 0x18, 0x2c, 0xa3, 0xa1, 0x1d, 0x68, 0x2c, 0x88, 0x51, 0x0d, 0xf4, + 0x01, 0x18, 0xa1, 0x73, 0x91, 0x34, 0xce, 0x86, 0xf4, 0x64, 0x01, 0x60, 0xca, 0x44, 0x7f, 0x2b, + 0x41, 0x23, 0x61, 0xc1, 0xfd, 0xda, 0x34, 0xc4, 0xfe, 0x6d, 0xb7, 0x8c, 0x8f, 0xf9, 0xb7, 0xad, + 0x21, 0x9d, 0x56, 0xe5, 0x63, 0x90, 0xce, 0x07, 0xd4, 0x55, 0x3f, 0x4b, 0x5d, 0xed, 0x3a, 0x75, + 0xb2, 0x60, 0x46, 0x81, 0xc7, 0xed, 0x31, 0x0f, 0x45, 0xab, 0x1e, 0xf7, 0xee, 0x85, 0x82, 0xde, + 0x85, 0x0a, 0xe3, 0x57, 0xc3, 0x19, 0x59, 0x85, 0x92, 0x98, 0xe9, 0x52, 0x2d, 0x89, 0x19, 0xfd, + 0xa5, 0x08, 0xb7, 0xf6, 0x42, 0xe1, 0x4c, 0xb9, 0xc0, 0x7d, 0xc4, 0x97, 0x5c, 0xf0, 0xaf, 0xc9, + 0x5f, 0x3e, 0x2a, 0xe3, 0x7a, 0x54, 0x9b, 0x03, 0xa8, 0x27, 0x47, 0x93, 0x15, 0xa8, 0xed, 0xf6, + 0x86, 0xdd, 0xd3, 0x5e, 0xbf, 0x59, 0x20, 0x4d, 0xb0, 0xf4, 0xe2, 0x6d, 0x77, 0xe7, 0xec, 0xb0, + 0x59, 0x24, 0x26, 0x54, 0xde, 0x28, 0xb1, 0x44, 0x2c, 0xa8, 0x1f, 0xf7, 0x86, 0x7b, 0x0a, 0x5a, + 0x96, 0xab, 0xbd, 0xe1, 0xe1, 0x1e, 0xdb, 0x7b, 0x75, 0xd2, 0x34, 0x36, 0x37, 0x00, 0xd2, 0x31, + 0x49, 0xda, 0x7a, 0xfd, 0xe1, 0x1e, 0xeb, 0xef, 0x1c, 0x37, 0x0b, 0x0a, 0xf9, 0xa3, 0x5e, 0x15, + 0x37, 0x3b, 0x50, 0x4f, 0x5a, 0x86, 0xb2, 0x74, 0x4f, 0xfb, 0xa7, 0x27, 0xbd, 0x6e, 0xb3, 0x40, + 0x00, 0xaa, 0xfd, 0x53, 0x76, 0x22, 0x51, 0xd2, 0x32, 0x60, 0xbd, 0x53, 0xd6, 0x1b, 0xfe, 0xd4, + 0x2c, 0x75, 0x7e, 0x36, 0xa1, 0xbc, 0x33, 0xe8, 0x91, 0x75, 0x30, 0xce, 0x84, 0xe7, 0x13, 0x45, + 0x8c, 0x1a, 0x19, 0xd7, 0x52, 0x91, 0x16, 0xc8, 0x36, 0xac, 0x76, 0xa3, 0x20, 0x40, 0x57, 0x24, + 0xc3, 0x59, 0x53, 0x4f, 0x32, 0x8b, 0xa7, 0x70, 0x2d, 0x3b, 0xac, 0xd0, 0x02, 0xf9, 0x3f, 0x40, + 0x1f, 0xaf, 0x6e, 0x0c, 0xff, 0x1f, 0xd4, 0xbb, 0x97, 0xdc, 0x71, 0x87, 0x8e, 0x4f, 0x6e, 0x27, + 0x29, 0x4c, 0xd1, 0x2a, 0x1b, 0xf1, 0x5c, 0x47, 0x0b, 0xe4, 0x31, 0xd4, 0xf4, 0x04, 0xb7, 0x0c, + 0xab, 0x2a, 0x20, 0x99, 0xf0, 0x68, 0x81, 0x3c, 0x85, 0xe6, 0x09, 0x0f, 0x05, 0x06, 0x83, 0xc0, + 0x79, 0xcf, 0x05, 0xca, 0x46, 0xb7, 0xe4, 0xb3, 0x64, 0x36, 0xa3, 0x05, 0xf2, 0x04, 0x6e, 0xe9, + 0x2f, 0xa2, 0xd1, 0xc4, 0x19, 0x7f, 0xfe, 0x83, 0x47, 0x50, 0x3d, 0xe4, 0xa1, 0xc4, 0x65, 0xc3, + 0x5a, 0x53, 0x51, 0x67, 0x27, 0x35, 0x5a, 0x20, 0x0f, 0xa1, 0xaa, 0x87, 0xb2, 0x0c, 0xd9, 0xea, + 0x9a, 0x2d, 0xc6, 0x35, 0x5a, 0x20, 0xcf, 0xc1, 0xca, 0x0c, 0x67, 0xe1, 0xb2, 0xe3, 0xff, 0xa1, + 0xc6, 0xb2, 0xfc, 0x04, 0xa7, 0xf6, 0x5f, 0x3d, 0x40, 0x91, 0xd1, 0x93, 0x7a, 0x3c, 0xbf, 0x39, + 0xf6, 0x9a, 0x9e, 0xe4, 0xd4, 0xfe, 0x8d, 0x03, 0x14, 0x99, 0x71, 0xe3, 0x5f, 0xd9, 0x27, 0x27, + 0x3d, 0x64, 0x55, 0xab, 0x93, 0x8e, 0x57, 0x20, 0x14, 0x2a, 0x6a, 0xd6, 0x20, 0x71, 0x6b, 0x48, + 0xc6, 0x8e, 0xb5, 0xc5, 0x29, 0xb4, 0x40, 0xee, 0x43, 0x6d, 0x37, 0x9a, 0xfa, 0x72, 0x5a, 0x49, + 0x0f, 0xcf, 0x02, 0x1e, 0x43, 0x73, 0xc7, 0xb6, 0x5f, 0xcb, 0x59, 0x0d, 0x6d, 0xdd, 0x31, 0x72, + 0xcc, 0x5d, 0xab, 0xbe, 0xe6, 0x01, 0x8a, 0xfc, 0x08, 0x91, 0xee, 0xab, 0xa9, 0xc9, 0x4e, 0x0e, + 0x32, 0x21, 0x96, 0x7a, 0xf2, 0x93, 0xfa, 0x8b, 0x9d, 0x4d, 0x86, 0x80, 0x9c, 0x2f, 0xfb, 0x70, + 0x37, 0xff, 0x36, 0xa5, 0x6f, 0xdd, 0x1d, 0xb5, 0xf5, 0x07, 0x0f, 0x57, 0x7c, 0x64, 0xae, 0xf3, + 0xab, 0x0a, 0x36, 0x17, 0x7d, 0x3d, 0xce, 0x57, 0xae, 0xcd, 0xc7, 0x21, 0xa9, 0xae, 0xa6, 0x6e, + 0xc7, 0x4a, 0xa6, 0x8d, 0x11, 0x95, 0xcb, 0x6b, 0x7d, 0x2d, 0xae, 0xaf, 0x7d, 0x94, 0xa4, 0xb7, + 0xa1, 0x7a, 0x80, 0xe2, 0x83, 0xfa, 0xca, 0x55, 0x60, 0x5d, 0xfa, 0xa1, 0xfe, 0x39, 0x96, 0x14, + 0x4b, 0x5d, 0x23, 0x25, 0x37, 0xcf, 0xa0, 0x21, 0xa1, 0xe9, 0x9f, 0xc7, 0x12, 0x7c, 0x23, 0x73, + 0x0c, 0xc6, 0xd7, 0xd9, 0x7a, 0xcd, 0x27, 0x13, 0x14, 0x7d, 0x4f, 0x38, 0xe7, 0x4b, 0xef, 0xc3, + 0xa2, 0xba, 0x9e, 0x16, 0xc9, 0x63, 0x80, 0x97, 0xd1, 0xd4, 0x1f, 0xf2, 0xd1, 0x64, 0xf9, 0x01, + 0xca, 0x75, 0xe6, 0x5d, 0x49, 0xf4, 0xa8, 0xaa, 0xe6, 0xfc, 0x67, 0x7f, 0x05, 0x00, 0x00, 0xff, + 0xff, 0x87, 0x69, 0x86, 0xca, 0xe0, 0x0e, 0x00, 0x00, +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/api/pb/api.proto b/vendor/github.com/OpenBazaar/multiwallet/api/pb/api.proto new file mode 100644 index 0000000000..791b442d0d --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/api/pb/api.proto @@ -0,0 +1,209 @@ +syntax = "proto3"; + +package pb; + +import "google/protobuf/timestamp.proto"; + +service API { + rpc Stop (Empty) returns (Empty) {} + rpc CurrentAddress (KeySelection) returns (Address) {} + rpc NewAddress (KeySelection) returns (Address) {} + rpc ChainTip (CoinSelection) returns (Height) {} + rpc Balance (CoinSelection) returns (Balances) {} + rpc MasterPrivateKey (CoinSelection) returns (Key) {} + rpc MasterPublicKey (CoinSelection) returns (Key) {} + rpc HasKey (Address) returns (BoolResponse) {} + rpc Params (Empty) returns (NetParams) {} + rpc Transactions (CoinSelection) returns (TransactionList) {} + rpc GetTransaction (Txid) returns (Tx) {} + rpc GetFeePerByte (FeeLevelSelection) returns (FeePerByte) {} + rpc Spend (SpendInfo) returns (Txid) {} + rpc BumpFee (Txid) returns (Txid) {} + rpc AddWatchedScript (Address) returns (Empty) {} + rpc GetConfirmations (Txid) returns (Confirmations) {} + rpc SweepAddress (SweepInfo) returns (Txid) {} + rpc CreateMultisigSignature (CreateMultisigInfo) returns (SignatureList) {} + rpc Multisign (MultisignInfo) returns (RawTx) {} + rpc EstimateFee (EstimateFeeData) returns (Fee) {} + rpc GetKey (Address) returns (Key) {} + rpc ListKeys (CoinSelection) returns (Keys) {} + rpc ListAddresses (CoinSelection) returns (Addresses) {} + rpc WalletNotify (CoinSelection) returns (stream Tx) {} + rpc DumpTables (CoinSelection) returns (stream Row) {} +} + +enum CoinType { + BITCOIN = 0; + BITCOIN_CASH = 1; + ZCASH = 2; + LITECOIN = 3; + ETHEREUM = 4; +} + +message Empty {} + +message CoinSelection { + CoinType coin = 1; +} + +enum KeyPurpose { + INTERNAL = 0; + EXTERNAL = 1; +} + +message Row { + string data = 1; +} + +message KeySelection { + CoinType coin = 1; + KeyPurpose purpose = 2; +} + +message Address { + CoinType coin = 1; + string addr = 2; +} + +message Height { + uint32 height = 1; +} + +message Balances { + uint64 confirmed = 1; + uint64 unconfirmed = 2; +} + +message Key { + string key = 1; +} + +message Keys { + repeated Key keys = 1; +} + +message Addresses { + repeated Address addresses = 1; +} + +message BoolResponse { + bool bool = 1; +} + +message NetParams { + string name = 1; +} + +message TransactionList { + repeated Tx transactions = 1; +} + +message Tx { + string txid = 1; + int64 value = 2; + int32 height = 3; + google.protobuf.Timestamp timestamp = 4; + bool watchOnly = 5; + bytes raw = 6; +} + +message Txid { + CoinType coin = 1; + string hash = 2; +} + +enum FeeLevel { + ECONOMIC = 0; + NORMAL = 1; + PRIORITY = 2; +} + +message FeeLevelSelection { + CoinType coin = 1; + FeeLevel feeLevel = 2; +} + +message FeePerByte { + uint64 fee = 1; +} + +message Fee { + uint64 fee = 1; +} + +message SpendInfo { + CoinType coin = 1; + string address = 2; + uint64 amount = 3; + FeeLevel feeLevel = 4; + string memo = 5; +} + +message Confirmations { + uint32 confirmations = 1; +} + +message Utxo { + string txid = 1; + uint32 index = 2; + uint64 value = 3; +} + +message SweepInfo { + CoinType coin = 1; + repeated Utxo utxos = 2; + string address = 3; + string key = 4; + bytes redeemScript = 5; + FeeLevel feeLevel = 6; +} + +message Input { + string txid = 1; + uint32 index = 2; +} + +message Output { + bytes scriptPubKey = 1; + uint64 value = 2; +} + +message Signature { + uint32 index = 1; + bytes signature = 2; +} + +message CreateMultisigInfo { + CoinType coin = 1; + repeated Input inputs = 2; + repeated Output outputs = 3; + string key = 4; + bytes redeemScript = 5; + uint64 feePerByte = 6; +} + +message SignatureList { + repeated Signature sigs = 1; +} + +message MultisignInfo { + CoinType coin = 1; + repeated Input inputs = 2; + repeated Output outputs = 3; + repeated Signature sig1 = 4; + repeated Signature sig2 = 5; + bytes redeemScript = 6; + uint64 feePerByte = 7; + bool broadcast = 8; +} + +message RawTx { + bytes tx = 1; +} + +message EstimateFeeData { + CoinType coin = 1; + repeated Input inputs = 2; + repeated Output outputs = 3; + uint64 feePerByte = 4; +} \ No newline at end of file diff --git a/vendor/github.com/OpenBazaar/multiwallet/api/rpc.go b/vendor/github.com/OpenBazaar/multiwallet/api/rpc.go new file mode 100644 index 0000000000..02586cbba4 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/api/rpc.go @@ -0,0 +1,281 @@ +package api + +import ( + "errors" + "net" + + "github.com/OpenBazaar/multiwallet" + "github.com/OpenBazaar/multiwallet/api/pb" + "github.com/OpenBazaar/multiwallet/bitcoin" + "github.com/OpenBazaar/multiwallet/bitcoincash" + "github.com/OpenBazaar/multiwallet/litecoin" + "github.com/OpenBazaar/multiwallet/zcash" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcutil" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +const Addr = "127.0.0.1:8234" + +type server struct { + w multiwallet.MultiWallet +} + +func ServeAPI(w multiwallet.MultiWallet) error { + lis, err := net.Listen("tcp", Addr) + if err != nil { + return err + } + s := grpc.NewServer() + pb.RegisterAPIServer(s, &server{w}) + reflection.Register(s) + if err := s.Serve(lis); err != nil { + return err + } + return nil +} + +func coinType(coinType pb.CoinType) wallet.CoinType { + switch coinType { + case pb.CoinType_BITCOIN: + return wallet.Bitcoin + case pb.CoinType_BITCOIN_CASH: + return wallet.BitcoinCash + case pb.CoinType_ZCASH: + return wallet.Zcash + case pb.CoinType_LITECOIN: + return wallet.Litecoin + default: + return wallet.Bitcoin + } +} + +func (s *server) Stop(ctx context.Context, in *pb.Empty) (*pb.Empty, error) { + // Stub + return &pb.Empty{}, nil +} + +func (s *server) CurrentAddress(ctx context.Context, in *pb.KeySelection) (*pb.Address, error) { + var purpose wallet.KeyPurpose + if in.Purpose == pb.KeyPurpose_INTERNAL { + purpose = wallet.INTERNAL + } else if in.Purpose == pb.KeyPurpose_EXTERNAL { + purpose = wallet.EXTERNAL + } else { + return nil, errors.New("Unknown key purpose") + } + ct := coinType(in.Coin) + wal, err := s.w.WalletForCurrencyCode(ct.CurrencyCode()) + if err != nil { + return nil, err + } + addr := wal.CurrentAddress(purpose) + return &pb.Address{Coin: in.Coin, Addr: addr.String()}, nil +} + +func (s *server) NewAddress(ctx context.Context, in *pb.KeySelection) (*pb.Address, error) { + var purpose wallet.KeyPurpose + if in.Purpose == pb.KeyPurpose_INTERNAL { + purpose = wallet.INTERNAL + } else if in.Purpose == pb.KeyPurpose_EXTERNAL { + purpose = wallet.EXTERNAL + } else { + return nil, errors.New("Unknown key purpose") + } + ct := coinType(in.Coin) + wal, err := s.w.WalletForCurrencyCode(ct.CurrencyCode()) + if err != nil { + return nil, err + } + addr := wal.NewAddress(purpose) + return &pb.Address{Coin: in.Coin, Addr: addr.String()}, nil +} + +func (s *server) ChainTip(ctx context.Context, in *pb.CoinSelection) (*pb.Height, error) { + ct := coinType(in.Coin) + wal, err := s.w.WalletForCurrencyCode(ct.CurrencyCode()) + if err != nil { + return nil, err + } + h, _ := wal.ChainTip() + return &pb.Height{Height: h}, nil +} + +func (s *server) Balance(ctx context.Context, in *pb.CoinSelection) (*pb.Balances, error) { + ct := coinType(in.Coin) + wal, err := s.w.WalletForCurrencyCode(ct.CurrencyCode()) + if err != nil { + return nil, err + } + c, u := wal.Balance() + return &pb.Balances{Confirmed: uint64(c), Unconfirmed: uint64(u)}, nil +} + +func (s *server) MasterPrivateKey(ctx context.Context, in *pb.CoinSelection) (*pb.Key, error) { + // Stub + return &pb.Key{Key: ""}, nil +} + +func (s *server) MasterPublicKey(ctx context.Context, in *pb.CoinSelection) (*pb.Key, error) { + // Stub + return &pb.Key{Key: ""}, nil +} + +func (s *server) Params(ctx context.Context, in *pb.Empty) (*pb.NetParams, error) { + // Stub + return &pb.NetParams{Name: ""}, nil +} + +func (s *server) HasKey(ctx context.Context, in *pb.Address) (*pb.BoolResponse, error) { + // Stub + return &pb.BoolResponse{Bool: false}, nil +} + +func (s *server) Transactions(ctx context.Context, in *pb.CoinSelection) (*pb.TransactionList, error) { + // Stub + var list []*pb.Tx + return &pb.TransactionList{Transactions: list}, nil +} + +func (s *server) GetTransaction(ctx context.Context, in *pb.Txid) (*pb.Tx, error) { + // Stub + respTx := &pb.Tx{} + return respTx, nil +} + +func (s *server) GetFeePerByte(ctx context.Context, in *pb.FeeLevelSelection) (*pb.FeePerByte, error) { + // Stub + return &pb.FeePerByte{Fee: 0}, nil +} + +func (s *server) Spend(ctx context.Context, in *pb.SpendInfo) (*pb.Txid, error) { + var addr btcutil.Address + var err error + + ct := coinType(in.Coin) + wal, err := s.w.WalletForCurrencyCode(ct.CurrencyCode()) + if err != nil { + return nil, err + } + addr, err = wal.DecodeAddress(in.Address) + if err != nil { + return nil, err + } + + var feeLevel wallet.FeeLevel + switch in.FeeLevel { + case pb.FeeLevel_PRIORITY: + feeLevel = wallet.PRIOIRTY + case pb.FeeLevel_NORMAL: + feeLevel = wallet.NORMAL + case pb.FeeLevel_ECONOMIC: + feeLevel = wallet.ECONOMIC + default: + feeLevel = wallet.NORMAL + } + txid, err := wal.Spend(int64(in.Amount), addr, feeLevel, "", false) + if err != nil { + return nil, err + } + return &pb.Txid{Coin: in.Coin, Hash: txid.String()}, nil +} + +func (s *server) BumpFee(ctx context.Context, in *pb.Txid) (*pb.Txid, error) { + // Stub + return &pb.Txid{Coin: in.Coin, Hash: ""}, nil +} + +func (s *server) AddWatchedScript(ctx context.Context, in *pb.Address) (*pb.Empty, error) { + return nil, nil +} + +func (s *server) GetConfirmations(ctx context.Context, in *pb.Txid) (*pb.Confirmations, error) { + // Stub + return &pb.Confirmations{Confirmations: 0}, nil +} + +func (s *server) SweepAddress(ctx context.Context, in *pb.SweepInfo) (*pb.Txid, error) { + // Stub + return &pb.Txid{Coin: in.Coin, Hash: ""}, nil +} + +func (s *server) CreateMultisigSignature(ctx context.Context, in *pb.CreateMultisigInfo) (*pb.SignatureList, error) { + var retSigs []*pb.Signature + return &pb.SignatureList{Sigs: retSigs}, nil +} + +func (s *server) Multisign(ctx context.Context, in *pb.MultisignInfo) (*pb.RawTx, error) { + // Stub + return &pb.RawTx{Tx: []byte{}}, nil +} + +func (s *server) EstimateFee(ctx context.Context, in *pb.EstimateFeeData) (*pb.Fee, error) { + // Stub + return &pb.Fee{Fee: 0}, nil +} + +func (s *server) WalletNotify(in *pb.CoinSelection, stream pb.API_WalletNotifyServer) error { + // Stub + return nil +} + +func (s *server) GetKey(ctx context.Context, in *pb.Address) (*pb.Key, error) { + // Stub + return &pb.Key{Key: ""}, nil +} + +func (s *server) ListAddresses(ctx context.Context, in *pb.CoinSelection) (*pb.Addresses, error) { + // Stub + var list []*pb.Address + return &pb.Addresses{Addresses: list}, nil +} + +func (s *server) ListKeys(ctx context.Context, in *pb.CoinSelection) (*pb.Keys, error) { + // Stub + var list []*pb.Key + return &pb.Keys{Keys: list}, nil +} + +type HeaderWriter struct { + stream pb.API_DumpTablesServer +} + +func (h *HeaderWriter) Write(p []byte) (n int, err error) { + hdr := &pb.Row{Data: string(p)} + if err := h.stream.Send(hdr); err != nil { + return 0, err + } + return 0, nil +} + +func (s *server) DumpTables(in *pb.CoinSelection, stream pb.API_DumpTablesServer) error { + writer := HeaderWriter{stream} + ct := coinType(in.Coin) + wal, err := s.w.WalletForCurrencyCode(ct.CurrencyCode()) + if err != nil { + return err + } + bitcoinWallet, ok := wal.(*bitcoin.BitcoinWallet) + if ok { + bitcoinWallet.DumpTables(&writer) + return nil + } + bitcoincashWallet, ok := wal.(*bitcoincash.BitcoinCashWallet) + if ok { + bitcoincashWallet.DumpTables(&writer) + return nil + } + litecoinWallet, ok := wal.(*litecoin.LitecoinWallet) + if ok { + litecoinWallet.DumpTables(&writer) + return nil + } + zcashWallet, ok := wal.(*zcash.ZCashWallet) + if ok { + zcashWallet.DumpTables(&writer) + return nil + } + return nil +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoin/sign.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/sign.go index 07baff1463..0be6935322 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/bitcoin/sign.go +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/sign.go @@ -12,7 +12,6 @@ import ( "github.com/btcsuite/btcd/chaincfg" - "github.com/OpenBazaar/spvwallet" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" @@ -255,32 +254,36 @@ func newUnsignedTransaction(outputs []*wire.TxOut, feePerKb btc.Amount, fetchInp } } -func (w *BitcoinWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *BitcoinWallet) bumpFee(txid string) (string, error) { txn, err := w.db.Txns().Get(txid) if err != nil { - return nil, err + return "", err } if txn.Height > 0 { - return nil, spvwallet.BumpFeeAlreadyConfirmedError + return "", util.BumpFeeAlreadyConfirmedError } if txn.Height < 0 { - return nil, spvwallet.BumpFeeTransactionDeadError + return "", util.BumpFeeTransactionDeadError + } + chTxid, err := chainhash.NewHashFromStr(txid) + if err != nil { + return "", err } // Check utxos for CPFP utxos, _ := w.db.Utxos().GetAll() for _, u := range utxos { - if u.Op.Hash.IsEqual(&txid) && u.AtHeight == 0 { + if u.Op.Hash.IsEqual(chTxid) && u.AtHeight == 0 { addr, err := w.ScriptToAddress(u.ScriptPubkey) if err != nil { - return nil, err + return "", err } key, err := w.km.GetKeyForScript(addr.ScriptAddress()) if err != nil { - return nil, err + return "", err } h, err := hex.DecodeString(u.Op.Hash.String()) if err != nil { - return nil, err + return "", err } n := new(big.Int) n, _ = n.SetString(u.Value, 10) @@ -292,15 +295,15 @@ func (w *BitcoinWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { } transactionID, err := w.sweepAddress([]wi.TransactionInput{in}, nil, key, nil, wi.FEE_BUMP) if err != nil { - return nil, err + return "", err } return transactionID, nil } } - return nil, spvwallet.BumpFeeNotFoundError + return "", util.BumpFeeNotFoundError } -func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { var internalAddr btc.Address if address != nil { internalAddr = *address @@ -309,7 +312,7 @@ func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Add } script, err := txscript.PayToAddrScript(internalAddr) if err != nil { - return nil, err + return "", err } var val int64 @@ -319,11 +322,11 @@ func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Add val += in.Value.Int64() ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { - return nil, err + return "", err } script, err := txscript.PayToAddrScript(in.LinkedAddress) if err != nil { - return nil, err + return "", err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) @@ -335,7 +338,7 @@ func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Add txType := P2PKH if redeemScript != nil { txType = P2SH_1of2_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) + _, err := util.LockTimeFromRedeemScript(*redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_1Sig } @@ -366,12 +369,12 @@ func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Add // Sign tx privKey, err := key.ECPrivKey() if err != nil { - return nil, fmt.Errorf("retrieving private key: %s", err.Error()) + return "", fmt.Errorf("retrieving private key: %s", err.Error()) } pk := privKey.PubKey().SerializeCompressed() addressPub, err := btc.NewAddressPubKey(pk, w.params) if err != nil { - return nil, fmt.Errorf("generating address pub key: %s", err.Error()) + return "", fmt.Errorf("generating address pub key: %s", err.Error()) } getKey := txscript.KeyClosure(func(addr btc.Address) (*btcec.PrivateKey, bool, error) { @@ -399,9 +402,9 @@ func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Add timeLocked = true tx.Version = 2 for _, txIn := range tx.TxIn { - locktime, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) + locktime, err := util.LockTimeFromRedeemScript(*redeemScript) if err != nil { - return nil, err + return "", err } txIn.Sequence = locktime } @@ -416,13 +419,13 @@ func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Add tx, i, prevOutScript, txscript.SigHashAll, getKey, getScript, txIn.SignatureScript) if err != nil { - return nil, errors.New("Failed to sign transaction") + return "", errors.New("Failed to sign transaction") } txIn.SignatureScript = script } else { sig, err := txscript.RawTxInWitnessSignature(tx, hashes, i, ins[i].Value.Int64(), *redeemScript, txscript.SigHashAll, privKey) if err != nil { - return nil, err + return "", err } var witness wire.TxWitness if timeLocked { @@ -437,10 +440,10 @@ func (w *BitcoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Add // broadcast if err := w.Broadcast(tx); err != nil { - return nil, err + return "", err } txid := tx.TxHash() - return &txid, nil + return txid.String(), nil } func (w *BitcoinWallet) createMultisigSignature(ins []wi.TransactionInput, outs []wi.TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte uint64) ([]wi.Signature, error) { @@ -466,7 +469,7 @@ func (w *BitcoinWallet) createMultisigSignature(ins []wi.TransactionInput, outs // Subtract fee txType := P2SH_2of3_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) + _, err := util.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } @@ -521,7 +524,7 @@ func (w *BitcoinWallet) multisign(ins []wi.TransactionInput, outs []wi.Transacti // Subtract fee txType := P2SH_2of3_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) + _, err := util.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoin/wallet.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/wallet.go index 147856b6c5..5a21e8a1ba 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/bitcoin/wallet.go +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/wallet.go @@ -17,11 +17,8 @@ import ( "github.com/OpenBazaar/multiwallet/model" "github.com/OpenBazaar/multiwallet/service" "github.com/OpenBazaar/multiwallet/util" - "github.com/OpenBazaar/spvwallet" - "github.com/OpenBazaar/spvwallet/exchangerates" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" btc "github.com/btcsuite/btcutil" @@ -38,7 +35,7 @@ type BitcoinWallet struct { params *chaincfg.Params client model.APIClient ws *service.WalletService - fp *spvwallet.FeeProvider + fp *util.FeeProvider mPrivKey *hd.ExtendedKey mPubKey *hd.ExtendedKey @@ -75,7 +72,7 @@ func NewBitcoinWallet(cfg config.CoinConfig, mnemonic string, params *chaincfg.P if err != nil { return nil, err } - er := exchangerates.NewBitcoinPriceFetcher(proxy) + er := util.NewBitcoinPriceFetcher(proxy) if !disableExchangeRates { go er.Run() } @@ -85,7 +82,7 @@ func NewBitcoinWallet(cfg config.CoinConfig, mnemonic string, params *chaincfg.P return nil, err } - fp := spvwallet.NewFeeProvider(cfg.MaxFee, cfg.HighFee, cfg.MediumFee, cfg.LowFee, cfg.SuperLowFee, cfg.FeeAPI, proxy) + fp := util.NewFeeProvider(cfg.MaxFee, cfg.HighFee, cfg.MediumFee, cfg.LowFee, cfg.SuperLowFee, er) return &BitcoinWallet{ db: cfg.DB, @@ -252,7 +249,7 @@ func (w *BitcoinWallet) Transactions() ([]wi.Txn, error) { return txns, nil } -func (w *BitcoinWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { +func (w *BitcoinWallet) GetTransaction(txid string) (wi.Txn, error) { txn, err := w.db.Txns().Get(txid) if err == nil { tx := wire.NewMsgTx(1) @@ -285,7 +282,7 @@ func (w *BitcoinWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { return txn, err } -func (w *BitcoinWallet) ChainTip() (uint32, chainhash.Hash) { +func (w *BitcoinWallet) ChainTip() (uint32, string) { return w.ws.ChainTip() } @@ -293,7 +290,7 @@ func (w *BitcoinWallet) GetFeePerByte(feeLevel wi.FeeLevel) big.Int { return *big.NewInt(int64(w.fp.GetFeePerByte(feeLevel))) } -func (w *BitcoinWallet) Spend(amount big.Int, addr btc.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (*chainhash.Hash, error) { +func (w *BitcoinWallet) Spend(amount big.Int, addr btc.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (string, error) { var ( tx *wire.MsgTx err error @@ -301,23 +298,23 @@ func (w *BitcoinWallet) Spend(amount big.Int, addr btc.Address, feeLevel wi.FeeL if spendAll { tx, err = w.buildSpendAllTx(addr, feeLevel) if err != nil { - return nil, err + return "", err } } else { tx, err = w.buildTx(amount.Int64(), addr, feeLevel, nil) if err != nil { - return nil, err + return "", err } } if err := w.Broadcast(tx); err != nil { - return nil, err + return "", err } ch := tx.TxHash() - return &ch, nil + return ch.String(), nil } -func (w *BitcoinWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *BitcoinWallet) BumpFee(txid string) (string, error) { return w.bumpFee(txid) } @@ -338,7 +335,7 @@ func (w *BitcoinWallet) EstimateSpendFee(amount big.Int, feeLevel wi.FeeLevel) ( return *big.NewInt(int64(val)), err } -func (w *BitcoinWallet) SweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *BitcoinWallet) SweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { return w.sweepAddress(ins, address, key, redeemScript, feeLevel) } @@ -384,7 +381,7 @@ func (w *BitcoinWallet) ReSyncBlockchain(fromTime time.Time) { go w.ws.UpdateState() } -func (w *BitcoinWallet) GetConfirmations(txid chainhash.Hash) (uint32, uint32, error) { +func (w *BitcoinWallet) GetConfirmations(txid string) (uint32, uint32, error) { txn, err := w.db.Txns().Get(txid) if err != nil { return 0, 0, err diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/sign.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/sign.go index b7d88dba13..d459681c3e 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/sign.go +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/sign.go @@ -11,7 +11,6 @@ import ( "github.com/btcsuite/btcd/chaincfg" - "github.com/OpenBazaar/spvwallet" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" @@ -261,32 +260,36 @@ func newUnsignedTransaction(outputs []*wire.TxOut, feePerKb btc.Amount, fetchInp } } -func (w *BitcoinCashWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *BitcoinCashWallet) bumpFee(txid string) (string, error) { txn, err := w.db.Txns().Get(txid) if err != nil { - return nil, err + return "", err } if txn.Height > 0 { - return nil, spvwallet.BumpFeeAlreadyConfirmedError + return "", util.BumpFeeAlreadyConfirmedError } if txn.Height < 0 { - return nil, spvwallet.BumpFeeTransactionDeadError + return "", util.BumpFeeTransactionDeadError + } + chTxid, err := chainhash.NewHashFromStr(txid) + if err != nil { + return "", err } // Check utxos for CPFP utxos, _ := w.db.Utxos().GetAll() for _, u := range utxos { - if u.Op.Hash.IsEqual(&txid) && u.AtHeight == 0 { + if u.Op.Hash.IsEqual(chTxid) && u.AtHeight == 0 { addr, err := w.ScriptToAddress(u.ScriptPubkey) if err != nil { - return nil, err + return "", err } key, err := w.km.GetKeyForScript(addr.ScriptAddress()) if err != nil { - return nil, err + return "", err } h, err := hex.DecodeString(u.Op.Hash.String()) if err != nil { - return nil, err + return "", err } n := new(big.Int) n, _ = n.SetString(u.Value, 10) @@ -298,15 +301,15 @@ func (w *BitcoinCashWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error } transactionID, err := w.sweepAddress([]wi.TransactionInput{in}, nil, key, nil, wi.FEE_BUMP) if err != nil { - return nil, err + return "", err } return transactionID, nil } } - return nil, spvwallet.BumpFeeNotFoundError + return "", util.BumpFeeNotFoundError } -func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { var internalAddr btc.Address if address != nil { internalAddr = *address @@ -315,7 +318,7 @@ func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc } script, err := bchutil.PayToAddrScript(internalAddr) if err != nil { - return nil, err + return "", err } var val int64 @@ -325,11 +328,11 @@ func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc val += in.Value.Int64() ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { - return nil, err + return "", err } script, err := bchutil.PayToAddrScript(in.LinkedAddress) if err != nil { - return nil, err + return "", err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) @@ -341,7 +344,7 @@ func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc txType := P2PKH if redeemScript != nil { txType = P2SH_1of2_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) + _, err := util.LockTimeFromRedeemScript(*redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_1Sig } @@ -372,12 +375,12 @@ func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc // Sign tx privKey, err := key.ECPrivKey() if err != nil { - return nil, fmt.Errorf("retrieving private key: %s", err.Error()) + return "", fmt.Errorf("retrieving private key: %s", err.Error()) } pk := privKey.PubKey().SerializeCompressed() addressPub, err := btc.NewAddressPubKey(pk, w.params) if err != nil { - return nil, fmt.Errorf("generating address pub key: %s", err.Error()) + return "", fmt.Errorf("generating address pub key: %s", err.Error()) } getKey := txscript.KeyClosure(func(addr btc.Address) (*btcec.PrivateKey, bool, error) { @@ -405,9 +408,9 @@ func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc timeLocked = true tx.Version = 2 for _, txIn := range tx.TxIn { - locktime, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) + locktime, err := util.LockTimeFromRedeemScript(*redeemScript) if err != nil { - return nil, err + return "", err } txIn.Sequence = locktime } @@ -421,17 +424,17 @@ func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc tx, i, prevOutScript, txscript.SigHashAll, getKey, getScript, txIn.SignatureScript, ins[i].Value.Int64()) if err != nil { - return nil, errors.New("Failed to sign transaction") + return "", errors.New("Failed to sign transaction") } txIn.SignatureScript = script } else { priv, err := key.ECPrivKey() if err != nil { - return nil, err + return "", err } script, err := bchutil.RawTxInSignature(tx, i, *redeemScript, txscript.SigHashAll, priv, ins[i].Value.Int64()) if err != nil { - return nil, err + return "", err } builder := txscript.NewScriptBuilder(). AddData(script). @@ -444,10 +447,10 @@ func (w *BitcoinCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc // broadcast if err := w.Broadcast(tx); err != nil { - return nil, err + return "", err } txid := tx.TxHash() - return &txid, nil + return txid.String(), nil } func (w *BitcoinCashWallet) createMultisigSignature(ins []wi.TransactionInput, outs []wi.TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte uint64) ([]wi.Signature, error) { @@ -473,7 +476,7 @@ func (w *BitcoinCashWallet) createMultisigSignature(ins []wi.TransactionInput, o // Subtract fee txType := P2SH_2of3_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) + _, err := util.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } @@ -527,7 +530,7 @@ func (w *BitcoinCashWallet) multisign(ins []wi.TransactionInput, outs []wi.Trans // Subtract fee txType := P2SH_2of3_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) + _, err := util.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/sign_test.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/sign_test.go new file mode 100644 index 0000000000..dea1e01419 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/sign_test.go @@ -0,0 +1,732 @@ +package bitcoincash + +import ( + "bytes" + "encoding/hex" + "github.com/OpenBazaar/multiwallet/util" + "github.com/gcash/bchd/txscript" + "os" + "testing" + "time" + + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/multiwallet/keys" + "github.com/OpenBazaar/multiwallet/model/mock" + "github.com/OpenBazaar/multiwallet/service" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/hdkeychain" + "github.com/cpacia/bchutil" + bchhash "github.com/gcash/bchd/chaincfg/chainhash" + bchwire "github.com/gcash/bchd/wire" +) + +type FeeResponse struct { + Priority int `json:"priority"` + Normal int `json:"normal"` + Economic int `json:"economic"` +} + +func newMockWallet() (*BitcoinCashWallet, error) { + mockDb := datastore.NewMockMultiwalletDatastore() + + db, err := mockDb.GetDatastoreForWallet(wallet.BitcoinCash) + if err != nil { + return nil, err + } + params := &chaincfg.MainNetParams + + seed, err := hex.DecodeString("16c034c59522326867593487c03a8f9615fb248406dd0d4ffb3a6b976a248403") + if err != nil { + return nil, err + } + master, err := hdkeychain.NewMaster(seed, params) + if err != nil { + return nil, err + } + km, err := keys.NewKeyManager(db.Keys(), params, master, wallet.BitcoinCash, bitcoinCashAddress) + if err != nil { + return nil, err + } + + fp := util.NewFeeProvider(2000, 300, 200, 100, nil) + + bw := &BitcoinCashWallet{ + params: params, + km: km, + db: db, + fp: fp, + } + cli := mock.NewMockApiClient(bw.AddressToScript) + ws, err := service.NewWalletService(db, km, cli, params, wallet.BitcoinCash, cache.NewMockCacher()) + if err != nil { + return nil, err + } + bw.client = cli + bw.ws = ws + return bw, nil +} + +func TestWalletService_VerifyWatchScriptFilter(t *testing.T) { + // Verify that AddWatchedAddress should never add a script which already represents a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + keys := w.km.GetKeys() + + addr, err := w.km.KeyToAddress(keys[0]) + if err != nil { + t.Fatal(err) + } + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) != 0 { + t.Error("Put watched scripts fails on key manager owned key") + } +} + +func TestWalletService_VerifyWatchScriptPut(t *testing.T) { + // Verify that AddWatchedAddress should add a script which does not represent a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + + addr, err := w.DecodeAddress("qqx0p0ja3xddkvwldaqwcvrkkgrzx6rjwuzla4ca90") + if err != nil { + t.Fatal(err) + } + + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) == 0 { + t.Error("Put watched scripts fails on non-key manager owned key") + } + +} + +func waitForTxnSync(t *testing.T, txnStore wallet.Txns) { + // Look for a known txn, this sucks a bit. It would be better to check if the + // number of stored txns matched the expected, but not all the mock + // transactions are relevant, so the numbers don't add up. + // Even better would be for the wallet to signal that the initial sync was + // done. + lastTxn := mock.MockTransactions[len(mock.MockTransactions)-2] + txHash, err := chainhash.NewHashFromStr(lastTxn.Txid) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 100; i++ { + if _, err := txnStore.Get(*txHash); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatal("timeout waiting for wallet to sync transactions") +} + +func TestBitcoinCashWallet_buildTx(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + time.Sleep(time.Second / 2) + + addr, err := w.DecodeAddress("qpf464w2g36kyklq9shvyjk9lvuf6ph7jv3k8qpq0m") + if err != nil { + t.Error(err) + } + // Test build normal tx + tx, err := w.buildTx(1500000, addr, wallet.NORMAL, nil) + if err != nil { + w.DumpTables(os.Stdout) + t.Error(err) + return + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if !validChangeAddress(tx, w.db, w.params) { + t.Error("Built tx does not contain a valid change output") + } + + // Insuffient funds + _, err = w.buildTx(1000000000, addr, wallet.NORMAL, nil) + if err != wallet.ErrorInsuffientFunds { + t.Error("Failed to throw insuffient funds error") + } + + // Dust + _, err = w.buildTx(1, addr, wallet.NORMAL, nil) + if err != wallet.ErrorDustAmount { + t.Error("Failed to throw dust error") + } +} + +func TestBitcoinCashWallet_buildSpendAllTx(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + time.Sleep(time.Second / 2) + + waitForTxnSync(t, w.db.Txns()) + addr, err := w.DecodeAddress("qpyafty5hf6uwjtd8y5tvgzeawfeyfhj55ke8l2dy7") + if err != nil { + t.Error(err) + } + + // Test build spendAll tx + tx, err := w.buildSpendAllTx(addr, wallet.NORMAL) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Fatal(err) + } + spendableUtxos := 0 + for _, u := range utxos { + if !u.WatchOnly { + spendableUtxos++ + } + } + if len(tx.TxIn) != spendableUtxos { + t.Error("Built tx does not spend all available utxos") + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if len(tx.TxOut) != 1 { + t.Error("Built tx should only have one output") + } + + bchTx := bchwire.MsgTx{ + Version: tx.Version, + LockTime: tx.LockTime, + } + for _, in := range tx.TxIn { + hash := bchhash.Hash(in.PreviousOutPoint.Hash) + op := bchwire.NewOutPoint(&hash, in.PreviousOutPoint.Index) + newIn := bchwire.TxIn{ + PreviousOutPoint: *op, + Sequence: in.Sequence, + SignatureScript: in.SignatureScript, + } + bchTx.TxIn = append(bchTx.TxIn, &newIn) + } + for _, out := range tx.TxOut { + newOut := bchwire.TxOut{ + Value: out.Value, + PkScript: out.PkScript, + } + bchTx.TxOut = append(bchTx.TxOut, &newOut) + } + + // Verify the signatures on each input using the scripting engine + for i, in := range tx.TxIn { + var prevScript []byte + var amt int64 + for _, u := range utxos { + if util.OutPointsEqual(u.Op, in.PreviousOutPoint) { + prevScript = u.ScriptPubkey + amt = u.Value + break + } + } + vm, err := txscript.NewEngine(prevScript, &bchTx, i, txscript.StandardVerifyFlags, nil, nil, amt) + if err != nil { + t.Fatal(err) + } + if err := vm.Execute(); err != nil { + t.Error(err) + } + } +} + +func containsOutput(tx *wire.MsgTx, addr btcutil.Address) bool { + for _, o := range tx.TxOut { + script, _ := bchutil.PayToAddrScript(addr) + if bytes.Equal(script, o.PkScript) { + return true + } + } + return false +} + +func validInputs(tx *wire.MsgTx, db wallet.Datastore) bool { + utxos, _ := db.Utxos().GetAll() + uMap := make(map[wire.OutPoint]bool) + for _, u := range utxos { + uMap[u.Op] = true + } + for _, in := range tx.TxIn { + if !uMap[in.PreviousOutPoint] { + return false + } + } + return true +} + +func validChangeAddress(tx *wire.MsgTx, db wallet.Datastore, params *chaincfg.Params) bool { + for _, out := range tx.TxOut { + addr, err := bchutil.ExtractPkScriptAddrs(out.PkScript, params) + if err != nil { + continue + } + _, err = db.Keys().GetPathForKey(addr.ScriptAddress()) + if err == nil { + return true + } + } + return false +} + +func TestBitcoinCashWallet_GenerateMultisigScript(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey1, err := key1.ECPubKey() + if err != nil { + t.Error(err) + } + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey2, err := key2.ECPubKey() + if err != nil { + t.Error(err) + } + key3, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey3, err := key3.ECPubKey() + if err != nil { + t.Error(err) + } + keys := []hdkeychain.ExtendedKey{*key1, *key2, *key3} + + // test without timeout + addr, redeemScript, err := w.generateMultisigScript(keys, 2, 0, nil) + if err != nil { + t.Error(err) + } + if addr.String() != "pzjfg2pg2q6uz445vx7hvmuw6rp0ay5f9q9vnhwqfl" { + t.Error("Returned invalid address") + } + + rs := "52" + // OP_2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey1.SerializeCompressed()) + // pubkey1 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey2.SerializeCompressed()) + // pubkey2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey3.SerializeCompressed()) + // pubkey3 + "53" + // OP_3 + "ae" // OP_CHECKMULTISIG + rsBytes, err := hex.DecodeString(rs) + if err != nil { + t.Error(err) + } + if !bytes.Equal(rsBytes, redeemScript) { + t.Error("Returned invalid redeem script") + } + + // test with timeout + key4, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey4, err := key4.ECPubKey() + if err != nil { + t.Error(err) + } + addr, redeemScript, err = w.generateMultisigScript(keys, 2, time.Hour*10, key4) + if err != nil { + t.Error(err) + } + if addr.String() != "ppx5mmammxfs42m0p6ypvf6znnkq3llskvlz0texus" { + t.Error("Returned invalid address") + } + + rs = "63" + // OP_IF + "52" + // OP_2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey1.SerializeCompressed()) + // pubkey1 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey2.SerializeCompressed()) + // pubkey2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey3.SerializeCompressed()) + // pubkey3 + "53" + // OP_3 + "ae" + // OP_CHECKMULTISIG + "67" + // OP_ELSE + "01" + // OP_PUSHDATA(1) + "3c" + // 60 blocks + "b2" + // OP_CHECKSEQUENCEVERIFY + "75" + // OP_DROP + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey4.SerializeCompressed()) + // timeout pubkey + "ac" + // OP_CHECKSIG + "68" // OP_ENDIF + rsBytes, err = hex.DecodeString(rs) + if !bytes.Equal(rsBytes, redeemScript) { + t.Error("Returned invalid redeem script") + } +} + +func TestBitcoinCashWallet_newUnsignedTransaction(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + addr, err := w.DecodeAddress("ppx5mmammxfs42m0p6ypvf6znnkq3llskvlz0texus") + if err != nil { + t.Error(err) + } + + script, err := bchutil.PayToAddrScript(addr) + if err != nil { + t.Error(err) + } + out := wire.NewTxOut(10000, script) + outputs := []*wire.TxOut{out} + + changeSource := func() ([]byte, error) { + addr := w.CurrentAddress(wallet.INTERNAL) + script, err := bchutil.PayToAddrScript(addr) + if err != nil { + return []byte{}, err + } + return script, nil + } + + inputSource := func(target btcutil.Amount) (total btcutil.Amount, inputs []*wire.TxIn, inputValues []btcutil.Amount, scripts [][]byte, err error) { + total += btcutil.Amount(utxos[0].Value) + in := wire.NewTxIn(&utxos[0].Op, []byte{}, [][]byte{}) + in.Sequence = 0 // Opt-in RBF so we can bump fees + inputs = append(inputs, in) + return total, inputs, inputValues, scripts, nil + } + + // Regular transaction + authoredTx, err := newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err != nil { + t.Error(err) + } + if len(authoredTx.Tx.TxOut) != 2 { + t.Error("Returned incorrect number of outputs") + } + if len(authoredTx.Tx.TxIn) != 1 { + t.Error("Returned incorrect number of inputs") + } + + // Insufficient funds + outputs[0].Value = 1000000000 + _, err = newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err == nil { + t.Error("Failed to return insuffient funds error") + } +} + +func TestBitcoinCashWallet_CreateMultisigSignature(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs) != 2 { + t.Error(err) + } + for _, sig := range sigs { + if len(sig.Signature) == 0 { + t.Error("Returned empty signature") + } + } +} + +func buildTxData(w *BitcoinCashWallet) ([]wallet.TransactionInput, []wallet.TransactionOutput, []byte, error) { + redeemScript := "522103c157f2a7c178430972263232c9306110090c50b44d4e906ecd6d377eec89a53c210205b02b9dbe570f36d1c12e3100e55586b2b9dc61d6778c1d24a8eaca03625e7e21030c83b025cd6bdd8c06e93a2b953b821b4a8c29da211335048d7dc3389706d7e853ae" + redeemScriptBytes, err := hex.DecodeString(redeemScript) + if err != nil { + return nil, nil, nil, err + } + h1, err := hex.DecodeString("1a20f4299b4fa1f209428dace31ebf4f23f13abd8ed669cebede118343a6ae05") + if err != nil { + return nil, nil, nil, err + } + in1 := wallet.TransactionInput{ + OutpointHash: h1, + OutpointIndex: 1, + } + h2, err := hex.DecodeString("458d88b4ae9eb4a347f2e7f5592f1da3b9ddf7d40f307f6e5d7bc107a9b3e90e") + if err != nil { + return nil, nil, nil, err + } + in2 := wallet.TransactionInput{ + OutpointHash: h2, + OutpointIndex: 0, + } + addr, err := w.DecodeAddress("ppx5mmammxfs42m0p6ypvf6znnkq3llskvlz0texus") + if err != nil { + return nil, nil, nil, err + } + + out := wallet.TransactionOutput{ + Value: 20000, + Address: addr, + } + return []wallet.TransactionInput{in1, in2}, []wallet.TransactionOutput{out}, redeemScriptBytes, nil +} + +func TestBitcoinCashWallet_Multisign(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs1, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs1) != 2 { + t.Error(err) + } + sigs2, err := w.CreateMultisigSignature(ins, outs, key2, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs2) != 2 { + t.Error(err) + } + txBytes, err := w.Multisign(ins, outs, sigs1, sigs2, redeemScript, 50, false) + if err != nil { + t.Error(err) + } + + tx := wire.NewMsgTx(0) + tx.BtcDecode(bytes.NewReader(txBytes), wire.ProtocolVersion, wire.WitnessEncoding) + if len(tx.TxIn) != 2 { + t.Error("Transactions has incorrect number of inputs") + } + if len(tx.TxOut) != 1 { + t.Error("Transactions has incorrect number of outputs") + } + for _, in := range tx.TxIn { + if len(in.SignatureScript) == 0 { + t.Error("Input script has zero length") + } + } +} + +func TestBitcoinCashWallet_bumpFee(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + txns, err := w.db.Txns().GetAll(false) + if err != nil { + t.Error(err) + } + ch, err := chainhash.NewHashFromStr(txns[2].Txid) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + for _, u := range utxos { + if u.Op.Hash.IsEqual(ch) { + u.AtHeight = 0 + w.db.Utxos().Put(u) + } + } + + w.db.Txns().UpdateHeight(*ch, 0, time.Now()) + + // Test unconfirmed + _, err = w.bumpFee(*ch) + if err != nil { + t.Error(err) + } + + err = w.db.Txns().UpdateHeight(*ch, 1289597, time.Now()) + if err != nil { + t.Error(err) + } + + // Test confirmed + _, err = w.bumpFee(*ch) + if err == nil { + t.Error("Should not be able to bump fee of confirmed txs") + } +} + +func TestBitcoinCashWallet_sweepAddress(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + var in wallet.TransactionInput + var key *hdkeychain.ExtendedKey + for _, ut := range utxos { + if ut.Value > 0 && !ut.WatchOnly { + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + key, err = w.km.GetKeyForScript(addr.ScriptAddress()) + if err != nil { + t.Error(err) + } + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + break + } + } + // P2PKH addr + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key, nil, wallet.NORMAL) + if err != nil { + t.Error(err) + return + } + + // 1 of 2 P2WSH + for _, ut := range utxos { + if ut.Value > 0 && ut.WatchOnly { + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + } + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + _, redeemScript, err := w.GenerateMultisigScript([]hdkeychain.ExtendedKey{*key1, *key2}, 1, 0, nil) + if err != nil { + t.Error(err) + } + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key1, &redeemScript, wallet.NORMAL) + if err != nil { + t.Error(err) + } +} + +func TestBitcoinCashWallet_estimateSpendFee(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + fee, err := w.estimateSpendFee(1000, wallet.NORMAL) + if err != nil { + t.Error(err) + } + if fee <= 0 { + t.Error("Returned incorrect fee") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/wallet.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/wallet.go index 99328ce5a4..c2b0ba20e0 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/wallet.go +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/wallet.go @@ -13,7 +13,6 @@ import ( wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" hd "github.com/btcsuite/btcutil/hdkeychain" @@ -248,7 +247,7 @@ func (w *BitcoinCashWallet) Transactions() ([]wi.Txn, error) { return txns, nil } -func (w *BitcoinCashWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { +func (w *BitcoinCashWallet) GetTransaction(txid string) (wi.Txn, error) { txn, err := w.db.Txns().Get(txid) if err == nil { tx := wire.NewMsgTx(1) @@ -275,7 +274,7 @@ func (w *BitcoinCashWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) return txn, err } -func (w *BitcoinCashWallet) ChainTip() (uint32, chainhash.Hash) { +func (w *BitcoinCashWallet) ChainTip() (uint32, string) { return w.ws.ChainTip() } @@ -283,7 +282,7 @@ func (w *BitcoinCashWallet) GetFeePerByte(feeLevel wi.FeeLevel) big.Int { return *big.NewInt(int64(w.fp.GetFeePerByte(feeLevel))) } -func (w *BitcoinCashWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (*chainhash.Hash, error) { +func (w *BitcoinCashWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (string, error) { var ( tx *wire.MsgTx err error @@ -291,25 +290,25 @@ func (w *BitcoinCashWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel if spendAll { tx, err = w.buildSpendAllTx(addr, feeLevel) if err != nil { - return nil, err + return "", err } } else { tx, err = w.buildTx(amount.Int64(), addr, feeLevel, nil) if err != nil { - return nil, err + return "", err } } // Broadcast if err := w.Broadcast(tx); err != nil { - return nil, err + return "", err } ch := tx.TxHash() - return &ch, nil + return ch.String(), nil } -func (w *BitcoinCashWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *BitcoinCashWallet) BumpFee(txid string) (string, error) { return w.bumpFee(txid) } @@ -330,7 +329,7 @@ func (w *BitcoinCashWallet) EstimateSpendFee(amount big.Int, feeLevel wi.FeeLeve return *big.NewInt(int64(val)), err } -func (w *BitcoinCashWallet) SweepAddress(ins []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *BitcoinCashWallet) SweepAddress(ins []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { return w.sweepAddress(ins, address, key, redeemScript, feeLevel) } @@ -389,7 +388,7 @@ func (w *BitcoinCashWallet) ReSyncBlockchain(fromTime time.Time) { go w.ws.UpdateState() } -func (w *BitcoinCashWallet) GetConfirmations(txid chainhash.Hash) (uint32, uint32, error) { +func (w *BitcoinCashWallet) GetConfirmations(txid string) (uint32, uint32, error) { txn, err := w.db.Txns().Get(txid) if err != nil { return 0, 0, err diff --git a/vendor/github.com/OpenBazaar/multiwallet/cli/cli.go b/vendor/github.com/OpenBazaar/multiwallet/cli/cli.go new file mode 100644 index 0000000000..fcd61dc799 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/cli/cli.go @@ -0,0 +1,323 @@ +package cli + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/OpenBazaar/multiwallet/api" + "github.com/OpenBazaar/multiwallet/api/pb" + "github.com/jessevdk/go-flags" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +func SetupCli(parser *flags.Parser) { + // Add commands to parser + parser.AddCommand("stop", + "stop the wallet", + "The stop command disconnects from peers and shuts down the wallet", + &stop) + parser.AddCommand("currentaddress", + "get the current bitcoin address", + "Returns the first unused address in the keychain\n\n"+ + "Args:\n"+ + "1. coinType (string)\n"+ + "2. purpose (string default=external) The purpose for the address. Can be external for receiving from outside parties or internal for example, for change.\n\n"+ + "Examples:\n"+ + "> multiwallet currentaddress bitcoin\n"+ + "1DxGWC22a46VPEjq8YKoeVXSLzB7BA8sJS\n"+ + "> multiwallet currentaddress bitcoin internal\n"+ + "18zAxgfKx4NuTUGUEuB8p7FKgCYPM15DfS\n", + ¤tAddress) + parser.AddCommand("newaddress", + "get a new bitcoin address", + "Returns a new unused address in the keychain. Use caution when using this function as generating too many new addresses may cause the keychain to extend further than the wallet's lookahead window, meaning it might fail to recover all transactions when restoring from seed. CurrentAddress is safer as it never extends past the lookahead window.\n\n"+ + "Args:\n"+ + "1. coinType (string)\n"+ + "2. purpose (string default=external) The purpose for the address. Can be external for receiving from outside parties or internal for example, for change.\n\n"+ + "Examples:\n"+ + "> multiwallet newaddress bitcoin\n"+ + "1DxGWC22a46VPEjq8YKoeVXSLzB7BA8sJS\n"+ + "> multiwallet newaddress bitcoin internal\n"+ + "18zAxgfKx4NuTUGUEuB8p7FKgCYPM15DfS\n", + &newAddress) + parser.AddCommand("chaintip", + "return the height of the chain", + "Returns the height of the best chain of blocks", + &chainTip) + parser.AddCommand("dumptables", + "print out the database tables", + "Prints each row in the database tables", + &dumpTables) + parser.AddCommand("spend", + "send bitcoins", + "Send bitcoins to the given address\n\n"+ + "Args:\n"+ + "1. coinType (string)\n"+ + "2. address (string) The recipient's bitcoin address\n"+ + "3. amount (integer) The amount to send in satoshi"+ + "4. feelevel (string default=normal) The fee level: economic, normal, priority\n\n"+ + "5. memo (string) The orderID\n"+ + "Examples:\n"+ + "> multiwallet spend bitcoin 1DxGWC22a46VPEjq8YKoeVXSLzB7BA8sJS 1000000\n"+ + "82bfd45f3564e0b5166ab9ca072200a237f78499576e9658b20b0ccd10ff325c 1a3w"+ + "> multiwallet spend bitcoin 1DxGWC22a46VPEjq8YKoeVXSLzB7BA8sJS 3000000000 priority\n"+ + "82bfd45f3564e0b5166ab9ca072200a237f78499576e9658b20b0ccd10ff325c 4wq2", + &spend) + parser.AddCommand("balance", + "get the wallet's balances", + "Returns the confirmed and unconfirmed balances for the specified coin", + &balance) +} + +func coinType(args []string) pb.CoinType { + if len(args) == 0 { + return pb.CoinType_BITCOIN + } + switch strings.ToLower(args[0]) { + case "bitcoin": + return pb.CoinType_BITCOIN + case "bitcoincash": + return pb.CoinType_BITCOIN_CASH + case "zcash": + return pb.CoinType_ZCASH + case "litecoin": + return pb.CoinType_LITECOIN + case "ethereum": + return pb.CoinType_ETHEREUM + default: + return pb.CoinType_BITCOIN + } +} + +func newGRPCClient() (pb.APIClient, *grpc.ClientConn, error) { + // Set up a connection to the server. + conn, err := grpc.Dial(api.Addr, grpc.WithInsecure()) + if err != nil { + return nil, nil, err + } + client := pb.NewAPIClient(conn) + return client, conn, nil +} + +type Stop struct{} + +var stop Stop + +func (x *Stop) Execute(args []string) error { + client, conn, err := newGRPCClient() + if err != nil { + return err + } + defer conn.Close() + client.Stop(context.Background(), &pb.Empty{}) + return nil +} + +type CurrentAddress struct{} + +var currentAddress CurrentAddress + +func (x *CurrentAddress) Execute(args []string) error { + client, conn, err := newGRPCClient() + if err != nil { + return err + } + defer conn.Close() + var purpose pb.KeyPurpose + userSelection := "" + + t := coinType(args) + if len(args) == 1 { + userSelection = args[0] + } else if len(args) == 2 { + userSelection = args[1] + } + switch strings.ToLower(userSelection) { + case "internal": + purpose = pb.KeyPurpose_INTERNAL + case "external": + purpose = pb.KeyPurpose_EXTERNAL + default: + purpose = pb.KeyPurpose_EXTERNAL + } + + resp, err := client.CurrentAddress(context.Background(), &pb.KeySelection{Coin: t, Purpose: purpose}) + if err != nil { + return err + } + fmt.Println(resp.Addr) + return nil +} + +type NewAddress struct{} + +var newAddress NewAddress + +func (x *NewAddress) Execute(args []string) error { + client, conn, err := newGRPCClient() + if err != nil { + return err + } + defer conn.Close() + if len(args) == 0 { + return errors.New("Must select coin type") + } + t := coinType(args) + var purpose pb.KeyPurpose + userSelection := "" + if len(args) == 1 { + userSelection = args[0] + } else if len(args) == 2 { + userSelection = args[1] + } + switch strings.ToLower(userSelection) { + case "internal": + purpose = pb.KeyPurpose_INTERNAL + case "external": + purpose = pb.KeyPurpose_EXTERNAL + default: + purpose = pb.KeyPurpose_EXTERNAL + } + resp, err := client.NewAddress(context.Background(), &pb.KeySelection{Coin: t, Purpose: purpose}) + if err != nil { + return err + } + fmt.Println(resp.Addr) + return nil +} + +type ChainTip struct{} + +var chainTip ChainTip + +func (x *ChainTip) Execute(args []string) error { + client, conn, err := newGRPCClient() + if err != nil { + return err + } + defer conn.Close() + if len(args) == 0 { + return errors.New("Must select coin type") + } + t := coinType(args) + resp, err := client.ChainTip(context.Background(), &pb.CoinSelection{Coin: t}) + if err != nil { + return err + } + fmt.Println(resp.Height) + return nil +} + +type DumpTables struct{} + +var dumpTables DumpTables + +func (x *DumpTables) Execute(args []string) error { + client, conn, err := newGRPCClient() + if err != nil { + return err + } + defer conn.Close() + if len(args) == 0 { + return errors.New("Must select coin type") + } + t := coinType(args) + resp, err := client.DumpTables(context.Background(), &pb.CoinSelection{Coin: t}) + if err != nil { + return err + } + for { + row, err := resp.Recv() + if err != nil { + // errors when no more rows and exits + return err + } + fmt.Println(row.Data) + } +} + +type Spend struct{} + +var spend Spend + +func (x *Spend) Execute(args []string) error { + var ( + address string + feeLevel pb.FeeLevel + referenceID string + userSelection string + + client, conn, err = newGRPCClient() + ) + if err != nil { + return err + } + defer conn.Close() + + if len(args) == 0 { + return errors.New("Must select coin type") + } + if len(args) > 4 { + address = args[1] + userSelection = args[3] + referenceID = args[4] + } + if len(args) < 4 { + return errors.New("Address and amount are required") + } + + switch strings.ToLower(userSelection) { + case "economic": + feeLevel = pb.FeeLevel_ECONOMIC + case "normal": + feeLevel = pb.FeeLevel_NORMAL + case "priority": + feeLevel = pb.FeeLevel_PRIORITY + default: + feeLevel = pb.FeeLevel_NORMAL + } + + amt, err := strconv.Atoi(args[2]) + if err != nil { + return err + } + + resp, err := client.Spend(context.Background(), &pb.SpendInfo{ + Coin: coinType(args), + Address: address, + Amount: uint64(amt), + FeeLevel: feeLevel, + Memo: referenceID, + }) + if err != nil { + return err + } + + fmt.Println(resp.Hash) + return nil +} + +type Balance struct{} + +var balance Balance + +func (x *Balance) Execute(args []string) error { + client, conn, err := newGRPCClient() + if err != nil { + return err + } + defer conn.Close() + if len(args) == 0 { + return errors.New("Must select coin type") + } + t := coinType(args) + resp, err := client.Balance(context.Background(), &pb.CoinSelection{Coin: t}) + if err != nil { + return err + } + fmt.Printf("Confirmed: %d, Unconfirmed: %d\n", resp.Confirmed, resp.Unconfirmed) + return nil +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go b/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go index 46a15a0e1d..f996146549 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go +++ b/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go @@ -21,7 +21,6 @@ import ( "github.com/OpenBazaar/multiwallet/client/transport" "github.com/OpenBazaar/multiwallet/model" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcutil" "github.com/cenkalti/backoff" "github.com/cpacia/bchutil" @@ -262,7 +261,8 @@ func (i *BlockBookClient) GetTransaction(txid string) (*model.Transaction, error } type resOut struct { model.Output - Spent bool `json:"spent"` + Spent bool `json:"spent"` + Addresses []string `json:"addresses"` } type resTx struct { model.Transaction @@ -281,6 +281,9 @@ func (i *BlockBookClient) GetTransaction(txid string) (*model.Transaction, error return nil, fmt.Errorf("error decoding transactions: %s", err) } for n, in := range tx.Vin { + if in.ValueIface == "" || in.ValueIface == nil { + in.ValueIface = "0" + } f, err := model.ToFloat(in.ValueIface) if err != nil { return nil, err @@ -288,6 +291,9 @@ func (i *BlockBookClient) GetTransaction(txid string) (*model.Transaction, error tx.Vin[n].Value = f } for n, out := range tx.Vout { + if out.ValueIface == "" || out.ValueIface == nil { + out.ValueIface = "0" + } f, err := model.ToFloat(out.ValueIface) if err != nil { return nil, err @@ -334,6 +340,9 @@ func (i *BlockBookClient) GetTransaction(txid string) (*model.Transaction, error for i, addr := range newOut.ScriptPubKey.Addresses { newOut.ScriptPubKey.Addresses[i] = maybeTrimCashAddrPrefix(addr) } + if len(o.Addresses) > 0 { + newOut.ScriptPubKey.Addresses = o.Addresses + } ctx.Outputs = append(ctx.Outputs, newOut) } return &ctx, nil @@ -387,6 +396,7 @@ func (i *BlockBookClient) getTransactions(addr string) ([]model.Transaction, err type resAddr struct { TotalPages int `json:"totalPages"` Transactions []string `json:"transactions"` + Txids []string `json:"txids"` } type txOrError struct { Tx *model.Transaction @@ -409,6 +419,10 @@ func (i *BlockBookClient) getTransactions(addr string) ([]model.Transaction, err return nil, fmt.Errorf("error decoding addrs response: %s", err) } txChan := make(chan txOrError) + if len(res.Transactions) == 0 && len(res.Txids) > 0 { + res.Transactions = res.Txids + } + go func() { var wg sync.WaitGroup wg.Add(len(res.Transactions)) @@ -643,16 +657,13 @@ func (i *BlockBookClient) setupListeners() error { Log.Errorf("error checking type after socket notification: %T", arg) return } - _, err := chainhash.NewHashFromStr(txid) // Check is 256 bit hash. Might also be address - if err == nil { - tx, err := i.GetTransaction(txid) - if err != nil { - Log.Errorf("error downloading tx after socket notification: %s", err.Error()) - return - } - tx.Time = time.Now().Unix() - i.txNotifyChan <- *tx + tx, err := i.GetTransaction(txid) + if err != nil { + Log.Errorf("error downloading tx after socket notification: %s", err.Error()) + return } + tx.Time = time.Now().Unix() + i.txNotifyChan <- *tx } }) diff --git a/vendor/github.com/OpenBazaar/multiwallet/client/pool_test.go b/vendor/github.com/OpenBazaar/multiwallet/client/pool_test.go new file mode 100644 index 0000000000..dd547749a7 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/client/pool_test.go @@ -0,0 +1,254 @@ +package client_test + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/OpenBazaar/multiwallet/client" + "github.com/OpenBazaar/multiwallet/model" + "github.com/OpenBazaar/multiwallet/model/mock" + "github.com/OpenBazaar/multiwallet/test/factory" + "github.com/jarcoal/httpmock" +) + +func replaceHTTPClientOnClientPool(p *client.ClientPool, c http.Client) { + for _, cp := range p.Clients() { + cp.HTTPClient = c + } + p.HTTPClient = c +} + +func mustPrepareClientPool(endpoints []string) (*client.ClientPool, func()) { + var p, err = client.NewClientPool(endpoints, nil) + if err != nil { + panic(err.Error()) + } + + mockedHTTPClient := http.Client{} + httpmock.ActivateNonDefault(&mockedHTTPClient) + replaceHTTPClientOnClientPool(p, mockedHTTPClient) + + mock.MockWebsocketClientOnClientPool(p) + err = p.Start() + if err != nil { + panic(err.Error()) + } + + return p, func() { + httpmock.DeactivateAndReset() + p.Close() + } +} + +func TestRequestRotatesServersOn500(t *testing.T) { + var ( + endpointOne = "http://localhost:8332" + endpointTwo = "http://localhost:8336" + p, cleanup = mustPrepareClientPool([]string{endpointOne, endpointTwo}) + expectedTx = factory.NewTransaction() + txid = "1be612e4f2b79af279e0b307337924072b819b3aca09fcb20370dd9492b83428" + ) + defer cleanup() + + httpmock.RegisterResponder(http.MethodGet, fmt.Sprintf("%s/tx/%s", endpointOne, txid), + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse(http.StatusInternalServerError, expectedTx) + }, + ) + httpmock.RegisterResponder(http.MethodGet, fmt.Sprintf("%s/tx/%s", endpointTwo, txid), + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse(http.StatusOK, expectedTx) + }, + ) + + _, err := p.GetTransaction(txid) + if err != nil { + t.Errorf("expected successful transaction, but got error: %s", err.Error()) + } +} + +func TestRequestRetriesTimeoutsToExhaustionThenRotates(t *testing.T) { + var ( + endpointOne = "http://localhost:8332" + endpointTwo = "http://localhost:8336" + fastTimeoutClient = http.Client{Timeout: 500000 * time.Nanosecond} + p, err = client.NewClientPool([]string{endpointOne, endpointTwo}, nil) + ) + if err != nil { + t.Fatal(err) + } + + httpmock.DeactivateAndReset() + httpmock.ActivateNonDefault(&fastTimeoutClient) + replaceHTTPClientOnClientPool(p, fastTimeoutClient) + mock.MockWebsocketClientOnClientPool(p) + if err = p.Start(); err != nil { + t.Fatal(err) + } + defer func() { + httpmock.DeactivateAndReset() + p.Close() + }() + + var ( + txid = "1be612e4f2b79af279e0b307337924072b819b3aca09fcb20370dd9492b83428" + expectedAttempts = uint(3) + requestAttempts uint + laggyResponse = func(req *http.Request) (*http.Response, error) { + if requestAttempts < expectedAttempts { + requestAttempts++ + time.Sleep(1 * time.Second) + return nil, fmt.Errorf("timeout") + } + return httpmock.NewJsonResponse(http.StatusOK, factory.NewTransaction()) + } + ) + httpmock.RegisterResponder(http.MethodGet, fmt.Sprintf("%s/tx/%s", endpointOne, txid), laggyResponse) + httpmock.RegisterResponder(http.MethodGet, fmt.Sprintf("%s/tx/%s", endpointTwo, txid), laggyResponse) + + _, err = p.GetTransaction(txid) + if err == nil { + t.Errorf("expected getTransaction to respond with timeout error, but did not") + return + } + if requestAttempts != expectedAttempts { + t.Errorf("expected initial server to be attempted %d times, but was attempted only %d", expectedAttempts, requestAttempts) + } + _, err = p.GetTransaction(txid) + if err != nil { + t.Errorf("expected getTransaction to rotate to the next server and succeed, but returned error: %s", err.Error()) + } +} + +func TestPoolBlockNotifyWorksAfterRotation(t *testing.T) { + var ( + endpointOne = "http://localhost:8332" + endpointTwo = "http://localhost:8336" + testHash = "0000000000000000003f1fb88ac3dab0e607e87def0e9031f7bea02cb464a04f" + txid = "1be612e4f2b79af279e0b307337924072b819b3aca09fcb20370dd9492b83428" + testPath = func(host string) string { return fmt.Sprintf("%s/tx/%s", host, txid) } + p, cleanup = mustPrepareClientPool([]string{endpointOne, endpointTwo}) + ) + defer cleanup() + + // GetTransaction should fail for endpoint one and succeed for endpoint two + var ( + beenBad bool + badThenGood = func(req *http.Request) (*http.Response, error) { + if beenBad { + return httpmock.NewJsonResponse(http.StatusOK, factory.NewTransaction()) + } + beenBad = true + return httpmock.NewJsonResponse(http.StatusInternalServerError, nil) + } + ) + httpmock.RegisterResponder(http.MethodGet, testPath(endpointOne), badThenGood) + httpmock.RegisterResponder(http.MethodGet, testPath(endpointTwo), badThenGood) + + go func() { + c := p.PoolManager().AcquireCurrentWhenReady() + c.BlockChannel() <- model.Block{Hash: testHash} + p.PoolManager().ReleaseCurrent() + }() + + ticker := time.NewTicker(time.Second * 2) + select { + case <-ticker.C: + t.Error("Timed out waiting for block") + case b := <-p.BlockNotify(): + if b.Hash != testHash { + t.Error("Returned incorrect block hash") + } + } + ticker.Stop() + + // request transaction triggers rotation + if _, err := p.GetTransaction(txid); err != nil { + t.Fatal(err) + } + + go func() { + c := p.PoolManager().AcquireCurrentWhenReady() + c.BlockChannel() <- model.Block{Hash: testHash} + p.PoolManager().ReleaseCurrent() + }() + + ticker = time.NewTicker(time.Second * 2) + select { + case <-ticker.C: + t.Error("Timed out waiting for block") + case b := <-p.BlockNotify(): + if b.Hash != testHash { + t.Error("Returned incorrect block hash") + } + } + ticker.Stop() +} + +func TestTransactionNotifyWorksAfterRotation(t *testing.T) { + var ( + endpointOne = "http://localhost:8332" + endpointTwo = "http://localhost:8336" + expectedTx = factory.NewTransaction() + expectedTxid = "500000e4f2b79af279e0b307337924072b819b3aca09fcb20370dd9492b83428" + testPath = func(host string) string { return fmt.Sprintf("%s/tx/%s", host, expectedTxid) } + p, cleanup = mustPrepareClientPool([]string{endpointOne, endpointTwo}) + ) + defer cleanup() + expectedTx.Txid = expectedTxid + + // GetTransaction should fail for endpoint one and succeed for endpoint two + var ( + beenBad bool + badThenGood = func(req *http.Request) (*http.Response, error) { + if beenBad { + return httpmock.NewJsonResponse(http.StatusOK, expectedTx) + } + beenBad = true + return httpmock.NewJsonResponse(http.StatusInternalServerError, nil) + } + ) + httpmock.RegisterResponder(http.MethodGet, testPath(endpointOne), badThenGood) + httpmock.RegisterResponder(http.MethodGet, testPath(endpointTwo), badThenGood) + + go func() { + c := p.PoolManager().AcquireCurrentWhenReady() + c.TxChannel() <- expectedTx + p.PoolManager().ReleaseCurrent() + }() + + ticker := time.NewTicker(time.Second * 2) + select { + case <-ticker.C: + t.Error("Timed out waiting for tx") + case b := <-p.TransactionNotify(): + if b.Txid != expectedTx.Txid { + t.Error("Returned incorrect tx hash") + } + } + ticker.Stop() + + // request transaction triggers rotation + if _, err := p.GetTransaction(expectedTxid); err != nil { + t.Fatal(err) + } + + go func() { + c := p.PoolManager().AcquireCurrentWhenReady() + c.TxChannel() <- expectedTx + p.PoolManager().ReleaseCurrent() + }() + + ticker = time.NewTicker(time.Second * 2) + select { + case <-ticker.C: + t.Error("Timed out waiting for tx") + case b := <-p.TransactionNotify(): + if b.Txid != expectedTx.Txid { + t.Error("Returned incorrect tx hash") + } + } + ticker.Stop() +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/cmd/multiwallet/main.go b/vendor/github.com/OpenBazaar/multiwallet/cmd/multiwallet/main.go new file mode 100644 index 0000000000..b0210137ff --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/cmd/multiwallet/main.go @@ -0,0 +1,83 @@ +package main + +import ( + "fmt" + "os" + "os/signal" + "sync" + + "github.com/OpenBazaar/multiwallet" + "github.com/OpenBazaar/multiwallet/api" + "github.com/OpenBazaar/multiwallet/cli" + "github.com/OpenBazaar/multiwallet/config" + wi "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/jessevdk/go-flags" +) + +const WALLET_VERSION = "0.1.0" + +var parser = flags.NewParser(nil, flags.Default) + +type Start struct { + Testnet bool `short:"t" long:"testnet" description:"use the test network"` +} +type Version struct{} + +var start Start +var version Version +var mw multiwallet.MultiWallet + +func main() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + for range c { + fmt.Println("Multiwallet shutting down...") + os.Exit(1) + } + }() + parser.AddCommand("start", + "start the wallet", + "The start command starts the wallet daemon", + &start) + parser.AddCommand("version", + "print the version number", + "Print the version number and exit", + &version) + cli.SetupCli(parser) + if _, err := parser.Parse(); err != nil { + os.Exit(1) + } +} + +func (x *Version) Execute(args []string) error { + fmt.Println(WALLET_VERSION) + return nil +} + +func (x *Start) Execute(args []string) error { + m := make(map[wi.CoinType]bool) + m[wi.Bitcoin] = true + m[wi.BitcoinCash] = true + m[wi.Zcash] = true + m[wi.Litecoin] = true + m[wi.Ethereum] = true + params := &chaincfg.MainNetParams + if x.Testnet { + params = &chaincfg.TestNet3Params + } + cfg := config.NewDefaultConfig(m, params) + cfg.Mnemonic = "bottle author ability expose illegal saddle antique setup pledge wife innocent treat" + var err error + mw, err = multiwallet.NewMultiWallet(cfg) + if err != nil { + return err + } + go api.ServeAPI(mw) + var wg sync.WaitGroup + wg.Add(1) + mw.Start() + wg.Wait() + return nil +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/coverage.out b/vendor/github.com/OpenBazaar/multiwallet/coverage.out new file mode 100644 index 0000000000..cdaaa0735b --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/coverage.out @@ -0,0 +1,2065 @@ +mode: set +github.com/OpenBazaar/multiwallet/zcash/wallet.go:99.2,99.65 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:96.16,98.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:94.90,96.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:78.2,91.8 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:74.27,76.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:73.2,74.27 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:69.16,71.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:68.2,69.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:64.16,66.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:63.2,64.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:59.16,61.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:58.2,59.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:55.16,57.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:54.2,55.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:51.16,53.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:494.54,496.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:490.80,492.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:485.2,486.23 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:482.16,484.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:481.2,482.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:47.175,51.16 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:469.3,479.46 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:466.17,468.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:464.31,466.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:464.2,464.31 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:450.3,462.43 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:447.17,449.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:446.3,447.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:441.55,443.10 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:440.28,441.55 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:438.29,440.28 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:438.2,438.29 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:435.16,437.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:426.2,435.16 3 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:423.16,425.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:421.65,423.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:415.26,417.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:413.2,415.26 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:410.26,412.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:407.48,410.26 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:403.56,405.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:398.31,401.2 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:394.2,395.67 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:391.21,393.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:391.2,391.21 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:388.16,390.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:386.85,388.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:382.60,384.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:378.85,380.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:374.2,375.12 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:371.16,373.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:370.2,371.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:367.16,369.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:365.61,367.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:361.2,362.12 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:357.16,359.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:356.2,357.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:353.3,353.50 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:350.17,352.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:348.29,350.17 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:345.75,348.29 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:341.190,343.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:337.205,339.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:333.188,335.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:329.179,331.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:325.92,327.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:320.2,322.20 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:315.27,319.3 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:313.117,315.27 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:309.77,311.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:306.2,306.39 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:302.16,304.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:301.2,302.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:296.17,298.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:294.8,296.17 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:291.17,293.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:289.14,291.17 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:284.147,289.14 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:280.66,282.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:276.59,278.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:273.2,273.17 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:271.3,271.21 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:264.4,269.29 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:261.18,263.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:259.32,261.18 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:258.3,259.32 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:255.17,257.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:251.16,255.17 4 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:249.75,251.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:246.2,246.18 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:242.3,244.15 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:238.19,240.25 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:235.32,237.25 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:233.61,234.26 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:231.62,232.33 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:229.18,230.26 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:228.3,228.10 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:225.21,227.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:221.26,225.21 4 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:221.2,221.26 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:218.16,220.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:215.56,218.16 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:212.2,212.38 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:209.26,211.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:201.64,209.26 3 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:198.2,198.13 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:195.16,197.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:193.57,195.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:189.77,191.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:186.2,186.18 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:183.16,185.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:181.79,183.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:177.75,179.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:174.2,174.13 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:171.72,173.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:171.2,171.72 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:168.16,170.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:167.2,168.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:164.16,166.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:162.73,164.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:159.2,159.13 1 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:156.16,158.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:155.2,156.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:152.16,154.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:150.77,152.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:139.2,147.23 2 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:136.8,138.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:134.18,136.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:131.111,134.18 3 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:127.57,129.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:123.58,125.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:119.49,121.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:114.8,116.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:112.50,114.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:111.45,112.50 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:107.49,109.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/wallet.go:102.31,105.2 2 0 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:248.2,248.22 1 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:245.32,247.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:244.73,245.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:236.2,240.13 1 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:231.36,232.58 1 0 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:229.35,230.58 1 0 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:227.26,228.53 1 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:225.26,226.53 1 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:223.13,224.42 1 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:221.2,222.19 2 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:216.21,219.3 2 1 +github.com/OpenBazaar/multiwallet/zcash/txsizes.go:213.113,216.21 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:98.3,98.50 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:94.4,96.48 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:923.2,929.13 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:920.31,922.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:918.45,920.31 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:91.18,92.13 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:909.2,915.13 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:904.29,908.3 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:902.46,904.29 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:90.4,91.18 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:893.2,899.13 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:882.29,892.3 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:880.46,882.29 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:877.2,877.25 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:873.16,875.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:872.2,873.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:87.18,88.13 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:867.16,869.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:866.2,867.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:861.16,863.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:860.2,861.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:855.16,857.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:854.2,855.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:849.16,851.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:846.2,849.16 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:841.16,843.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:838.2,841.16 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:833.17,835.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:832.3,833.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:828.17,830.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:823.30,828.17 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:823.2,823.30 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:819.16,821.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:817.2,819.16 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:812.17,814.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:809.3,812.17 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:805.17,807.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:804.3,805.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:800.17,802.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:797.3,800.17 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:793.17,795.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:790.29,793.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:790.2,790.29 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:785.16,787.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:783.2,785.16 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:78.35,87.18 9 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:778.16,780.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:777.2,778.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:772.16,774.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:767.88,772.16 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:76.3,78.35 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:73.17,75.4 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:709.2,762.18 30 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:704.8,706.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:700.82,704.3 3 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:700.8,700.82 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:70.2,70.136 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:70.136,73.17 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:698.48,700.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:697.2,698.48 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:689.8,691.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:687.48,689.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:685.2,687.48 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:678.8,680.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:676.48,678.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:672.2,676.48 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:67.25,69.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:665.16,667.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:664.2,665.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:659.16,661.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:655.2,659.16 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:649.26,651.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:645.146,649.26 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:642.2,642.59 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:64.2,67.25 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:638.16,640.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:637.2,638.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:634.16,636.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:631.83,634.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:625.2,625.36 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:622.20,624.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:622.2,622.20 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:616.107,618.10 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:615.30,616.107 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:614.32,615.30 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:614.2,614.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:611.16,613.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:61.16,63.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:609.2,611.16 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:606.34,608.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:605.2,606.34 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:602.16,604.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:601.2,602.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:598.16,600.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:595.92,598.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:592.2,592.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:589.16,591.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:588.2,589.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:584.16,586.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:580.2,584.16 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:577.29,579.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:575.2,577.29 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:572.3,572.33 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:569.17,571.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:567.27,569.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:566.2,567.27 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:560.27,564.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:560.2,560.27 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:556.54,558.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:555.186,556.54 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:552.2,552.44 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:55.2,61.16 5 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:548.44,550.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:547.2,547.15 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:547.15,548.44 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:544.3,544.36 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:541.17,543.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:535.3,541.17 7 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:529.32,531.10 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:528.3,528.29 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:528.29,529.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:523.32,525.10 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:522.29,523.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:519.32,522.29 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:517.2,519.32 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:511.35,513.4 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:51.89,53.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:51.2,51.89 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:509.23,511.35 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:507.2,509.23 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:502.3,503.38 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:499.17,501.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:497.27,499.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:497.2,497.27 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:493.3,495.35 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:490.17,492.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:488.25,490.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:486.205,488.25 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:483.2,483.18 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:480.3,481.26 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:48.16,50.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:477.17,478.12 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:475.25,477.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:475.2,475.25 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:471.16,473.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:468.2,471.16 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:462.35,464.4 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:460.23,462.35 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:458.2,460.23 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:453.3,454.38 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:450.17,452.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:45.134,48.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:448.27,450.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:448.2,448.27 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:443.3,446.35 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:440.17,442.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:438.25,440.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:434.188,438.25 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:431.2,431.39 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:428.16,430.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:427.2,428.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:423.3,423.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:420.17,422.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:419.3,420.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:416.26,418.4 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:413.3,416.26 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:410.17,412.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:408.31,410.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:408.2,408.31 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:404.16,406.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:390.2,404.16 5 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:387.16,389.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:380.2,387.16 5 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:377.8,379.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:374.17,376.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:371.25,374.17 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:368.2,371.25 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:363.3,366.44 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:360.17,362.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:359.3,360.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:356.17,358.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:352.25,356.17 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:348.2,352.25 5 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:344.16,346.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:343.2,344.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:340.8,342.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:338.20,340.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:336.175,338.20 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:333.2,333.44 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:330.4,330.29 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:327.18,329.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:320.4,327.18 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:317.18,319.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:316.4,317.18 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:313.18,315.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:312.4,313.18 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:309.18,311.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:307.50,309.18 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:306.26,307.50 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:305.2,306.26 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:301.20,303.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:301.2,301.20 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:298.20,300.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:298.2,298.20 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:295.16,297.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:293.77,295.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:284.3,289.22 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:278.4,281.19 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:274.45,277.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:274.4,274.45 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:271.18,273.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:269.51,271.18 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:260.3,269.51 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:255.39,257.12 2 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:252.3,255.39 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:248.43,250.4 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:248.3,248.43 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:245.17,247.4 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:242.6,245.17 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:239.2,242.6 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:235.32,237.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:232.186,235.32 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:229.2,229.16 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:227.3,227.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:224.17,226.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:220.3,224.17 5 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:217.17,219.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:216.3,217.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:213.17,215.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:212.3,213.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:209.17,211.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:206.31,209.17 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:206.2,206.31 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:204.3,204.46 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:201.10,203.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:198.88,201.10 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:191.2,198.88 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:186.94,188.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:181.2,186.94 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:176.16,178.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:170.2,176.16 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:167.16,169.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:162.100,167.16 4 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:159.2,159.27 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:157.3,157.32 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:154.17,156.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:150.3,154.17 5 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:147.17,149.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:145.3,147.17 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:142.17,144.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:141.3,142.17 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:138.17,140.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:135.42,138.17 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:135.2,135.42 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:130.88,134.3 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:127.2,130.88 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:122.16,124.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:121.2,122.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:118.27,120.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:117.2,118.27 2 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:114.3,114.21 1 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:111.17,113.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/sign.go:108.41,111.17 3 1 +github.com/OpenBazaar/multiwallet/zcash/sign.go:102.2,108.41 3 1 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:97.2,99.21 3 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:93.17,95.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:91.14,93.17 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:90.83,91.14 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:87.2,87.19 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:84.9,86.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:77.81,84.9 6 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:74.2,74.19 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:71.9,73.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:65.83,71.9 5 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:51.2,62.11 5 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:48.19,50.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:42.67,48.19 4 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:330.2,330.12 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:327.26,329.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:327.2,327.26 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:324.15,326.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:323.2,324.15 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:320.16,322.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:319.2,320.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:316.9,318.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:315.2,316.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:312.9,314.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:311.2,312.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:308.9,310.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:306.2,308.9 3 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:303.9,305.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:302.2,303.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:299.16,301.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:297.122,299.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:294.2,294.12 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:291.26,293.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:291.2,291.26 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:288.2,288.15 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:288.15,290.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:284.9,286.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:283.2,284.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:280.9,282.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:279.2,280.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:276.9,278.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:275.2,276.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:272.9,274.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:271.2,272.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:268.9,270.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:267.2,268.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:264.16,266.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:262.121,264.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:259.2,259.12 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:256.26,258.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:256.2,256.26 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:253.15,255.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:251.2,253.15 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:248.16,250.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:247.2,248.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:244.9,246.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:243.2,244.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:240.9,242.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:239.2,240.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:236.9,238.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:235.2,236.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:232.16,234.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:230.122,232.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:227.2,227.12 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:224.26,226.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:224.2,224.26 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:221.15,223.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:219.2,221.15 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:216.16,218.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:215.2,216.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:212.9,214.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:211.2,212.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:208.9,210.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:207.2,208.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:204.9,206.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:203.2,204.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:200.9,202.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:199.2,200.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:196.9,198.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:195.2,196.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:192.9,194.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:191.2,192.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:188.9,190.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:187.2,188.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:184.9,186.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:183.2,184.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:180.16,182.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:178.120,180.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:175.2,175.12 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:172.4,172.36 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:169.11,171.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:168.4,169.11 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:165.11,167.5 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:163.23,165.11 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:162.25,163.23 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:162.2,162.25 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:159.9,161.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:158.2,159.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:155.9,157.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:154.2,155.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:151.9,153.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:150.2,151.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:146.9,148.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:144.124,146.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:141.2,141.83 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:138.16,140.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:135.2,138.16 4 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:132.16,134.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:131.2,132.16 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:127.33,130.3 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:126.59,127.33 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:121.21,123.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:118.35,121.21 3 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:115.2,115.59 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:111.17,113.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:109.39,111.17 2 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:106.55,109.39 3 0 +github.com/OpenBazaar/multiwallet/zcash/exchange_rates.go:102.48,104.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:93.57,97.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:84.2,86.8 3 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:81.49,83.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:78.2,81.49 4 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:75.22,77.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:73.75,75.22 2 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:63.55,70.2 6 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:55.45,60.2 4 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:47.13,52.2 4 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:334.2,334.47 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:331.8,331.159 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:331.159,333.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:329.101,331.3 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:326.99,329.101 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:317.63,320.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:309.63,313.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:302.2,303.27 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:300.3,300.53 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:297.18,299.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:296.26,297.18 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:294.3,294.53 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:291.18,293.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:290.26,291.18 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:286.60,289.29 2 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:280.61,282.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:273.45,275.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:267.2,267.56 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:264.9,266.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:262.65,264.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:256.52,258.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:250.52,252.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:243.2,245.18 3 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:239.9,241.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:238.2,239.9 2 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:234.39,236.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:232.104,234.39 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:223.104,225.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:216.102,219.2 2 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:204.61,206.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:197.45,199.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:191.2,191.56 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:188.9,190.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:186.65,188.9 2 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:180.52,182.2 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:174.52,176.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:167.2,169.18 3 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:163.9,165.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:162.2,163.9 2 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:158.35,160.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:156.92,158.35 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:147.92,149.2 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:133.10,134.63 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:129.11,130.37 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:127.15,128.60 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:125.16,126.52 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:123.26,124.35 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:119.22,122.30 3 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:118.2,118.22 1 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:116.3,116.65 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:113.32,115.4 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:112.16,113.32 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:111.2,112.16 2 1 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:106.9,108.3 1 0 +github.com/OpenBazaar/multiwallet/zcash/address/address.go:103.87,106.9 2 1 +github.com/OpenBazaar/multiwallet/util/satoshis.go:6.56,8.2 1 0 +github.com/OpenBazaar/multiwallet/util/outpoints.go:9.2,9.27 1 1 +github.com/OpenBazaar/multiwallet/util/outpoints.go:6.30,8.3 1 1 +github.com/OpenBazaar/multiwallet/util/outpoints.go:5.46,6.30 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:96.29,98.3 1 0 +github.com/OpenBazaar/multiwallet/util/fees.go:96.2,96.29 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:92.36,94.3 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:90.2,92.36 2 1 +github.com/OpenBazaar/multiwallet/util/fees.go:86.10,87.24 1 0 +github.com/OpenBazaar/multiwallet/util/fees.go:84.23,85.30 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:82.23,83.26 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:80.21,81.24 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:78.23,79.26 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:76.2,77.18 2 1 +github.com/OpenBazaar/multiwallet/util/fees.go:72.29,74.3 1 0 +github.com/OpenBazaar/multiwallet/util/fees.go:71.2,72.29 2 1 +github.com/OpenBazaar/multiwallet/util/fees.go:67.29,69.3 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:67.2,67.29 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:63.11,64.23 1 0 +github.com/OpenBazaar/multiwallet/util/fees.go:61.24,62.29 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:59.24,60.25 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:57.22,58.23 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:55.24,56.25 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:53.30,54.19 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:52.71,53.30 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:42.122,50.2 1 1 +github.com/OpenBazaar/multiwallet/util/fees.go:100.2,100.27 1 1 +github.com/OpenBazaar/multiwallet/util/currency.go:6.56,8.2 1 0 +github.com/OpenBazaar/multiwallet/util/coin.go:93.2,93.72 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:90.3,91.54 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:87.17,88.12 1 0 +github.com/OpenBazaar/multiwallet/util/coin.go:86.3,87.17 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:83.17,84.12 1 0 +github.com/OpenBazaar/multiwallet/util/coin.go:73.33,83.17 9 1 +github.com/OpenBazaar/multiwallet/util/coin.go:67.187,73.33 5 1 +github.com/OpenBazaar/multiwallet/util/coin.go:64.2,64.10 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:62.3,62.13 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:59.17,60.12 1 0 +github.com/OpenBazaar/multiwallet/util/coin.go:58.3,59.17 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:55.17,56.12 1 0 +github.com/OpenBazaar/multiwallet/util/coin.go:54.3,55.17 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:50.17,51.12 1 0 +github.com/OpenBazaar/multiwallet/util/coin.go:49.3,50.17 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:46.21,48.4 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:45.3,46.21 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:42.18,43.12 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:41.26,42.18 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:39.218,41.26 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:28.130,37.2 2 1 +github.com/OpenBazaar/multiwallet/util/coin.go:26.39,26.81 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:25.39,25.62 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:24.39,24.64 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:23.39,23.59 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:22.39,22.59 1 1 +github.com/OpenBazaar/multiwallet/util/coin.go:21.39,21.58 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:9.81,11.26 2 1 +github.com/OpenBazaar/multiwallet/util/balance.go:58.2,58.24 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:54.9,56.4 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:50.74,52.6 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:49.27,50.74 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:48.9,49.27 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:46.29,48.9 2 1 +github.com/OpenBazaar/multiwallet/util/balance.go:45.2,46.29 2 1 +github.com/OpenBazaar/multiwallet/util/balance.go:40.16,42.3 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:37.2,40.16 4 1 +github.com/OpenBazaar/multiwallet/util/balance.go:34.9,36.3 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:31.72,34.9 2 1 +github.com/OpenBazaar/multiwallet/util/balance.go:28.2,28.31 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:22.11,24.6 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:20.61,22.6 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:19.10,20.61 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:17.25,19.5 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:16.22,17.25 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:15.29,16.22 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:15.2,15.29 1 1 +github.com/OpenBazaar/multiwallet/util/balance.go:11.26,13.3 1 1 +github.com/OpenBazaar/multiwallet/util/address.go:29.2,29.43 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:26.67,28.3 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:26.2,26.67 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:23.70,25.3 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:23.2,23.70 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:20.69,22.3 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:20.2,20.69 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:17.69,19.3 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:17.2,17.69 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:14.23,16.3 1 0 +github.com/OpenBazaar/multiwallet/util/address.go:13.86,14.23 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:96.33,98.2 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:90.34,94.2 3 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:87.2,87.16 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:84.3,85.29 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:80.62,83.4 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:78.8,80.62 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:76.16,78.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:646.49,648.2 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:641.2,643.44 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:638.16,640.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:631.78,638.16 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:628.2,628.14 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:625.3,625.51 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:623.4,623.18 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:619.18,621.13 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:617.24,619.18 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:616.4,616.16 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:612.18,614.13 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:610.21,612.18 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:609.4,609.19 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:605.18,607.13 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:603.27,605.18 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:602.4,602.23 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:598.4,598.23 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:598.23,600.13 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:594.18,596.13 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:592.23,594.18 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:589.38,591.22 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:589.2,589.38 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:584.16,587.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:583.2,584.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:581.3,581.52 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:577.17,579.12 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:575.27,577.17 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:572.72,575.27 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:57.184,76.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:562.40,564.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:561.75,562.40 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:554.3,554.29 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:554.29,557.4 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:550.17,553.4 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:548.8,548.23 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:548.23,550.17 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:546.3,547.27 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:542.17,545.4 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:541.3,542.17 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:536.9,540.4 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:534.26,536.4 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:533.3,534.26 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:530.26,532.4 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:528.16,530.26 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:525.2,528.16 4 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:520.2,520.15 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:520.15,523.3 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:517.3,517.18 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:513.18,515.5 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:508.3,508.20 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:508.20,513.18 4 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:505.10,506.12 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:497.3,505.10 7 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:493.43,494.12 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:493.3,493.43 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:487.18,490.5 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:485.81,487.18 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:484.3,485.81 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:480.17,482.12 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:478.32,480.17 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:478.2,478.32 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:476.3,476.18 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:472.3,472.20 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:472.20,475.4 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:469.10,470.12 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:459.3,469.10 5 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:455.17,458.4 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:451.3,455.17 5 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:446.17,449.4 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:444.3,446.17 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:440.17,442.12 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:439.3,440.17 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:435.17,437.12 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:433.30,435.17 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:431.2,433.30 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:427.16,430.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:426.2,427.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:422.25,424.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:415.115,422.25 6 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:410.25,412.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:404.96,410.25 4 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:394.8,397.3 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:392.16,394.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:391.2,392.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:388.27,390.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:385.66,388.27 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:378.42,382.2 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:371.48,374.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:363.2,371.48 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:359.25,361.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:358.2,359.25 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:354.25,356.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:352.2,354.25 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:347.16,350.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:346.2,347.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:342.16,345.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:340.110,342.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:333.52,335.5 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:332.10,333.52 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:330.35,332.10 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:330.2,330.35 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:323.3,327.47 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:319.17,321.12 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:317.26,319.17 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:311.2,317.26 5 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:306.16,309.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:303.92,306.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:294.8,297.3 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:292.16,294.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:291.2,292.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:288.27,290.3 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:285.68,288.27 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:277.2,280.22 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:272.8,274.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:271.3,271.19 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:268.17,270.4 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:264.16,268.17 4 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:260.40,264.16 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:250.19,252.6 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:249.5,250.19 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:246.6,246.12 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:243.8,243.16 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:240.51,242.9 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:238.41,240.51 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:237.30,238.41 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:234.5,234.30 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:234.30,237.30 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:230.19,233.6 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:228.28,230.19 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:226.21,228.28 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:225.25,226.21 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:224.2,225.25 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:220.16,223.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:219.2,220.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:215.16,218.3 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:214.2,215.16 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:207.73,211.3 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:204.2,207.73 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:201.16,203.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:193.66,201.16 7 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:185.6,185.11 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:182.20,184.7 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:180.68,182.20 2 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:179.28,180.68 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:178.32,179.28 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:178.3,178.32 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:162.33,173.11 3 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:161.52,162.33 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:160.34,161.52 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:159.27,160.34 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:159.2,159.27 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:155.16,157.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:147.75,155.16 8 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:140.29,141.37 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:138.23,139.40 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:136.22,137.10 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:134.6,135.10 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:132.2,134.6 2 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:129.27,131.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:121.35,129.27 3 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:116.33,118.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:115.90,116.33 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:110.101,112.2 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:107.2,107.28 1 1 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:104.16,106.3 1 0 +github.com/OpenBazaar/multiwallet/service/wallet_service.go:100.62,104.16 4 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:97.16,99.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:95.93,97.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:79.2,92.8 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:75.27,77.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:74.2,75.27 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:71.16,73.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:70.2,71.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:66.16,68.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:65.2,66.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:61.16,63.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:60.2,61.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:57.16,59.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:56.2,57.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:53.16,55.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:512.83,514.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:507.2,508.12 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:504.16,506.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:503.2,504.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:491.3,501.46 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:49.181,53.16 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:488.17,490.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:486.31,488.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:486.2,486.31 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:472.3,484.43 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:469.17,471.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:468.3,469.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:463.55,465.10 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:462.28,463.55 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:460.29,462.28 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:460.2,460.29 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:457.16,459.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:445.58,457.16 5 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:440.3,440.85 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:437.9,439.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:435.31,437.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:433.25,435.31 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:433.2,433.25 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:429.35,431.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:429.2,429.35 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:426.35,428.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:420.2,426.35 7 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:417.26,419.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:415.2,417.26 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:412.26,414.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:409.51,412.26 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:405.59,407.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:400.34,403.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:396.2,397.67 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:393.21,395.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:393.2,393.21 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:390.16,392.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:388.88,390.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:384.63,386.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:380.88,382.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:376.2,377.12 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:373.16,375.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:372.2,373.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:369.16,371.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:367.64,369.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:363.2,364.12 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:359.16,361.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:358.2,359.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:355.3,355.50 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:352.17,354.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:350.29,352.17 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:347.78,350.29 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:343.193,345.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:339.208,341.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:335.191,337.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:331.182,333.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:327.95,329.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:322.2,324.20 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:317.27,321.3 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:315.120,317.27 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:311.80,313.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:307.2,308.17 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:303.40,305.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:303.2,303.40 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:297.17,299.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:295.8,297.17 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:292.17,294.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:290.14,292.17 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:285.150,290.14 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:281.69,283.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:277.62,279.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:274.2,274.17 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:272.3,272.21 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:265.4,270.29 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:262.18,264.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:260.32,262.18 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:259.3,260.32 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:256.17,258.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:252.16,256.17 4 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:250.78,252.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:247.2,247.18 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:243.3,245.15 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:239.19,241.25 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:236.32,238.25 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:234.61,235.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:232.62,233.33 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:230.18,231.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:229.3,229.10 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:226.21,228.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:222.26,226.21 4 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:222.2,222.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:219.16,221.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:216.59,219.16 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:210.67,214.2 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:205.60,208.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:201.80,203.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:197.82,199.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:193.78,195.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:190.2,190.13 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:186.65,187.9 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:186.3,186.65 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:183.73,185.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:183.3,183.73 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:180.17,182.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:179.3,180.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:176.17,178.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:174.6,176.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:172.76,174.6 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:169.2,169.13 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:165.73,167.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:165.3,165.73 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:162.65,163.9 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:162.3,162.65 1 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:158.17,160.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:157.3,158.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:154.17,156.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:152.6,154.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:150.80,152.6 2 1 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:139.2,147.23 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:136.8,138.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:134.18,136.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:131.114,134.18 3 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:127.60,129.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:123.61,125.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:119.52,121.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:114.8,116.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:112.50,114.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:111.48,112.50 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:107.52,109.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:102.34,105.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/wallet.go:100.2,100.65 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:248.2,248.22 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:245.32,247.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:244.73,245.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:236.2,240.13 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:231.36,232.58 1 0 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:229.35,230.58 1 0 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:227.26,228.53 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:225.26,226.53 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:223.13,224.42 1 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:221.2,222.19 2 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:216.21,219.3 2 1 +github.com/OpenBazaar/multiwallet/litecoin/txsizes.go:213.113,216.21 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:97.3,97.21 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:94.17,96.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:91.41,94.17 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:85.2,91.41 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:81.3,81.50 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:78.4,79.55 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:75.18,76.13 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:74.4,75.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:71.18,72.13 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:674.2,674.36 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:671.20,673.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:671.2,671.20 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:665.107,667.10 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:664.30,665.107 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:663.32,664.30 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:663.2,663.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:660.16,662.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:658.2,660.16 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:655.34,657.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:654.2,655.34 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:651.16,653.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:650.2,651.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:647.16,649.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:644.95,647.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:641.2,641.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:638.16,640.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:635.2,638.16 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:631.16,633.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:630.2,631.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:620.3,628.28 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:62.35,71.18 9 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:617.30,619.4 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:614.3,617.30 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:611.17,613.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:609.8,611.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:606.3,607.43 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:603.30,605.4 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:600.34,603.30 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:60.3,62.35 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:599.2,600.34 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:596.3,596.33 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:593.17,595.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:591.27,593.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:590.2,591.27 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:584.27,588.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:584.2,584.27 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:580.54,582.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:579.189,580.54 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:574.2,576.25 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:570.41,572.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:57.17,59.4 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:569.2,569.15 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:569.15,570.41 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:565.3,566.26 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:562.17,564.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:560.3,562.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:554.32,556.10 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:553.3,553.29 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:553.29,554.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:548.32,550.10 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:547.29,548.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:544.32,547.29 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:544.2,544.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:540.39,542.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:54.2,54.136 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:54.136,57.17 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:536.2,540.39 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:530.35,532.4 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:528.23,530.35 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:526.2,528.23 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:523.16,525.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:521.2,523.16 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:516.3,517.38 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:513.17,515.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:511.27,513.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:511.2,511.27 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:51.25,53.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:507.3,509.35 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:504.17,506.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:502.25,504.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:500.208,502.25 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:497.2,497.18 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:494.3,495.26 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:491.17,492.12 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:489.25,491.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:488.2,489.25 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:484.16,486.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:481.2,484.16 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:48.2,51.25 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:475.35,477.4 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:473.23,475.35 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:471.2,473.23 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:468.16,470.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:466.2,468.16 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:461.3,462.38 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:458.17,460.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:456.27,458.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:456.2,456.27 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:452.3,454.35 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:45.16,47.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:449.17,451.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:447.25,449.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:444.191,447.25 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:440.2,441.19 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:437.40,439.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:437.2,437.40 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:431.4,432.26 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:428.10,430.5 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:426.18,428.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:425.4,426.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:422.18,424.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:420.9,422.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:419.4,419.33 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:416.18,418.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:411.26,416.18 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:410.31,411.26 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:409.2,410.31 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:404.5,404.29 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:401.19,403.6 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:399.33,401.19 2 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:396.30,399.33 3 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:394.25,396.30 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:393.2,394.25 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:39.2,45.16 5 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:389.3,389.28 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:386.26,388.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:385.77,386.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:385.2,385.77 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:383.3,383.45 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:381.4,381.47 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:378.18,380.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:376.57,378.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:375.88,376.57 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:375.2,375.88 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:371.16,373.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:369.2,371.16 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:366.16,368.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:352.2,366.16 5 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:35.93,37.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:349.16,351.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:342.2,349.16 5 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:338.17,340.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:335.25,338.17 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:332.2,335.25 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:327.3,330.44 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:324.17,326.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:323.3,324.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:320.17,322.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:32.137,35.93 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:317.25,320.17 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:314.2,317.25 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:310.16,312.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:309.2,310.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:306.8,308.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:304.20,306.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:302.178,304.20 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:299.2,299.44 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:296.4,296.29 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:293.18,295.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:286.4,293.18 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:283.18,285.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:282.4,283.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:279.18,281.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:278.4,279.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:275.18,277.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:273.50,275.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:272.26,273.50 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:271.2,272.26 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:267.20,269.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:267.2,267.20 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:264.20,266.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:264.2,264.20 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:261.16,263.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:259.80,261.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:250.3,255.9 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:244.4,247.19 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:240.45,243.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:240.4,240.45 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:237.18,239.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:235.51,237.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:226.3,235.51 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:221.51,223.12 2 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:218.3,221.51 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:214.55,216.4 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:214.3,214.55 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:211.17,213.4 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:209.6,211.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:206.2,209.6 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:202.32,204.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:199.172,202.32 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:196.2,196.16 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:194.3,194.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:191.17,193.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:186.31,191.17 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:186.2,186.31 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:183.37,185.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:182.2,183.37 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:180.3,180.46 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:177.10,179.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:174.88,177.10 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:167.2,174.88 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:162.98,164.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:157.2,162.98 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:152.16,154.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:146.2,152.16 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:143.16,145.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:138.103,143.16 4 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:135.2,135.27 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:133.3,133.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:130.17,132.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:125.42,130.17 3 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:125.2,125.42 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:122.37,124.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:121.2,122.37 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:118.3,119.46 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:115.17,117.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:113.88,115.17 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:110.2,113.88 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:105.16,107.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:104.2,105.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:101.27,103.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/sign.go:100.2,101.27 2 1 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:97.2,100.28 4 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:93.17,95.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:91.14,93.17 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:90.86,91.14 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:87.2,87.19 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:84.9,86.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:77.84,84.9 6 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:74.2,74.19 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:71.9,73.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:65.86,71.9 5 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:51.2,62.11 5 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:48.19,50.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:42.73,48.19 4 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:335.56,337.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:331.2,331.12 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:328.26,330.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:328.2,328.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:325.15,327.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:324.2,325.15 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:321.16,323.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:320.2,321.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:317.9,319.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:316.2,317.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:313.9,315.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:310.2,313.9 4 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:307.9,309.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:306.2,307.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:303.16,305.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:301.122,303.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:298.2,298.12 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:295.26,297.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:295.2,295.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:292.2,292.15 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:292.15,294.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:288.9,290.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:287.2,288.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:284.9,286.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:283.2,284.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:280.9,282.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:279.2,280.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:276.9,278.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:275.2,276.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:272.9,274.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:271.2,272.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:268.16,270.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:266.121,268.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:263.2,263.12 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:260.26,262.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:260.2,260.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:257.15,259.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:255.2,257.15 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:252.16,254.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:251.2,252.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:248.9,250.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:247.2,248.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:244.9,246.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:243.2,244.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:240.9,242.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:239.2,240.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:236.16,238.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:234.122,236.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:231.2,231.12 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:228.26,230.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:228.2,228.26 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:225.15,227.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:223.2,225.15 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:220.16,222.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:219.2,220.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:216.9,218.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:215.2,216.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:212.9,214.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:211.2,212.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:208.9,210.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:207.2,208.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:204.9,206.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:203.2,204.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:200.9,202.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:199.2,200.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:196.9,198.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:195.2,196.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:192.9,194.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:191.2,192.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:188.9,190.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:187.2,188.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:184.16,186.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:182.120,184.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:179.2,179.12 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:176.4,176.36 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:173.11,175.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:172.4,173.11 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:169.11,171.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:167.23,169.11 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:166.25,167.23 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:166.2,166.25 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:163.9,165.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:162.2,163.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:159.9,161.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:158.2,159.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:155.9,157.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:154.2,155.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:150.9,152.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:148.124,150.9 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:145.2,145.83 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:142.16,144.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:139.2,142.16 4 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:136.16,138.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:135.2,136.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:131.33,134.3 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:130.59,131.33 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:125.21,127.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:122.38,125.21 3 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:119.2,119.59 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:115.17,117.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:113.39,115.17 2 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:110.58,113.39 3 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:106.51,108.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:103.2,103.18 1 0 +github.com/OpenBazaar/multiwallet/litecoin/exchange_rates.go:100.28,102.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:99.16,101.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:95.2,99.16 5 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:89.16,91.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:85.98,89.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:791.39,794.2 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:782.2,782.47 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:779.82,781.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:779.8,779.82 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:777.82,779.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:777.8,777.82 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:775.8,775.159 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:775.159,777.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:773.101,775.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:771.99,773.101 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:77.55,81.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:767.70,769.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:760.63,763.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:752.63,756.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:745.2,746.27 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:743.3,743.53 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:740.18,742.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:739.26,740.18 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:738.3,738.60 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:735.18,737.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:734.33,735.18 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:733.3,733.53 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:730.18,732.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:729.26,730.18 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:725.60,728.29 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:719.60,721.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:714.58,716.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:709.49,711.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:703.52,705.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:694.72,697.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:687.59,689.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:682.2,682.12 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:679.16,681.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:676.59,679.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:663.2,670.18 3 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:658.28,661.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:655.101,658.28 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:647.111,650.2 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:64.13,71.2 6 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:632.56,634.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:626.60,628.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:621.58,623.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:616.49,618.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:610.52,612.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:601.72,604.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:594.59,596.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:589.2,589.12 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:586.16,588.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:583.59,586.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:570.2,577.18 3 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:565.28,568.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:562.101,565.28 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:554.111,557.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:539.51,541.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:532.64,536.2 3 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:522.58,524.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:516.47,518.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:510.41,512.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:503.61,506.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:497.48,499.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:490.48,492.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:477.17,478.36 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:474.21,475.40 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:471.23,472.42 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:469.10,470.14 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:467.44,468.24 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:456.2,462.8 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:453.18,454.23 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:451.18,452.27 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:449.2,450.29 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:441.16,443.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:439.94,441.16 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:408.61,410.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:401.45,403.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:393.65,396.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:387.52,389.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:381.52,383.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:374.2,376.18 3 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:370.39,372.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:368.94,370.39 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:358.104,361.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:350.102,354.2 3 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:338.61,340.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:331.45,333.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:33.56,35.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:323.65,326.2 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:317.52,319.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:311.52,313.2 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:304.2,306.18 3 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:300.35,302.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:298.82,300.35 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:288.92,291.2 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:276.2,276.32 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:271.66,274.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:271.2,271.66 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:266.47,268.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:266.2,266.47 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:261.16,263.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:260.2,261.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:253.18,255.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:252.2,253.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:25.52,27.2 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:247.2,247.19 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:247.19,249.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:241.16,243.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:238.64,241.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:233.2,233.14 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:230.43,232.3 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:228.47,230.43 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:220.10,221.63 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:216.11,217.37 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:214.15,215.55 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:212.16,213.47 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:210.26,211.35 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:206.22,209.30 3 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:205.2,205.22 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:203.3,203.65 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:200.32,202.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:199.16,200.32 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:198.2,199.16 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:194.3,194.56 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:191.17,193.4 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:189.41,191.17 2 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:189.2,189.41 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:181.12,182.65 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:179.12,180.57 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:177.12,178.57 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:174.4,176.28 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:169.4,169.23 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:169.23,171.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:163.18,165.5 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:161.35,163.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:159.18,161.35 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:152.79,159.18 2 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:113.2,113.18 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:109.72,111.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:109.2,109.72 1 1 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:105.16,107.3 1 0 +github.com/OpenBazaar/multiwallet/litecoin/address/address.go:104.2,105.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:99.17,100.9 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:94.6,99.17 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:94.2,94.6 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:91.8,93.3 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:89.16,91.3 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:86.87,89.16 3 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:83.2,83.51 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:80.2,80.17 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:80.17,82.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:77.16,79.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:75.89,77.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:72.2,72.32 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:69.16,71.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:68.2,69.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:64.16,66.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:63.2,64.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:59.16,61.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:58.2,59.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:54.16,56.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:53.2,54.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:49.16,51.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:46.127,49.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:42.2,42.16 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:39.39,41.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:31.2,39.39 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:28.16,30.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:26.157,28.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:199.82,201.2 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:196.2,196.12 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:190.19,192.6 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:188.50,190.19 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:187.29,188.50 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:186.46,187.29 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:184.41,186.46 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:181.2,181.47 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:178.8,178.39 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:178.39,180.3 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:176.32,178.3 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:175.106,176.32 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:172.2,172.23 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:169.66,171.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:168.65,169.66 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:164.2,164.68 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:154.3,162.20 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:151.17,153.4 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:149.16,151.17 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:147.86,149.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:144.2,144.13 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:142.3,142.25 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:139.17,140.12 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:137.32,139.17 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:137.2,137.32 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:134.16,136.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:131.51,134.16 3 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:128.2,128.17 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:125.16,127.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:124.2,125.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:121.16,123.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:120.2,121.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:117.39,119.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:116.89,117.39 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:113.2,113.22 1 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:110.16,112.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:108.2,110.16 3 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:105.16,107.3 1 0 +github.com/OpenBazaar/multiwallet/keys/keys.go:104.2,105.16 2 1 +github.com/OpenBazaar/multiwallet/keys/keys.go:102.3,102.13 1 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:97.15,99.3 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:96.80,97.15 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:88.9,90.4 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:84.56,87.12 3 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:82.6,84.56 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:81.71,82.6 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:70.2,75.15 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:67.3,68.56 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:64.17,66.4 1 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:62.33,64.17 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:57.95,62.33 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:40.49,42.2 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:36.40,38.2 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:33.2,33.22 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:30.8,32.3 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:27.41,29.4 1 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:25.53,27.41 2 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:25.2,25.53 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:21.35,24.3 1 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:203.37,205.2 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:199.35,201.2 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:195.36,197.2 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:191.34,193.2 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:19.39,21.35 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:183.54,185.6 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:183.5,183.54 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:179.27,182.6 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:178.47,179.27 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:178.4,178.47 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:175.42,176.13 1 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:174.7,175.42 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:172.34,174.7 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:168.40,172.34 3 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:161.8,163.3 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:156.41,161.8 4 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:151.2,152.12 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:147.43,149.3 1 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:147.2,147.43 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:142.10,144.4 1 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:136.9,142.10 5 0 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:131.65,136.9 4 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:125.3,126.30 2 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:122.16,124.4 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:121.34,122.16 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:117.42,121.34 3 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:112.44,114.2 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:107.2,107.27 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:103.16,104.9 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:102.16,103.16 1 1 +github.com/OpenBazaar/multiwallet/client/rotation_manager.go:100.2,102.16 3 1 +github.com/OpenBazaar/multiwallet/client/pool.go:95.11,96.15 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:93.25,94.10 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:91.6,92.10 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:90.28,91.6 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:87.2,87.16 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:84.46,86.3 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:82.61,84.46 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:77.36,80.2 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:71.2,72.18 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:68.16,70.3 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:58.2,68.16 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:54.25,56.3 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:53.87,54.25 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:48.37,50.2 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:42.34,46.2 3 1 +github.com/OpenBazaar/multiwallet/client/pool.go:393.67,393.86 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:384.46,390.2 5 1 +github.com/OpenBazaar/multiwallet/client/pool.go:374.64,382.2 6 0 +github.com/OpenBazaar/multiwallet/client/pool.go:369.2,370.19 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:364.4,365.14 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:361.18,363.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:36.60,38.2 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:358.4,361.18 4 0 +github.com/OpenBazaar/multiwallet/client/pool.go:355.28,357.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:353.56,355.28 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:350.78,353.56 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:345.2,346.16 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:340.4,341.14 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:337.18,339.5 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:334.56,337.18 3 1 +github.com/OpenBazaar/multiwallet/client/pool.go:331.78,334.56 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:326.2,327.17 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:321.4,322.14 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:318.18,320.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:315.4,318.18 4 0 +github.com/OpenBazaar/multiwallet/client/pool.go:312.28,314.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:310.56,312.28 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:307.92,310.56 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:302.2,303.16 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:298.4,299.14 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:295.18,297.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:292.56,295.18 3 0 +github.com/OpenBazaar/multiwallet/client/pool.go:289.69,292.56 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:284.2,285.18 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:279.4,280.14 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:276.18,278.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:273.56,276.18 3 0 +github.com/OpenBazaar/multiwallet/client/pool.go:270.53,273.56 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:265.2,266.19 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:260.4,261.14 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:257.18,259.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:254.56,257.18 3 0 +github.com/OpenBazaar/multiwallet/client/pool.go:251.59,254.56 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:246.2,247.17 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:241.4,242.14 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:238.18,240.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:235.56,238.18 3 0 +github.com/OpenBazaar/multiwallet/client/pool.go:232.60,235.56 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:227.2,228.18 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:222.4,223.14 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:219.18,221.5 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:216.56,219.18 3 0 +github.com/OpenBazaar/multiwallet/client/pool.go:213.59,216.56 1 0 +github.com/OpenBazaar/multiwallet/client/pool.go:208.55,210.2 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:203.2,204.54 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:198.9,201.4 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:197.4,197.12 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:194.4,194.20 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:194.20,196.5 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:190.4,190.35 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:190.35,193.5 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:186.45,189.5 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:184.42,186.45 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:182.50,184.42 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:180.95,182.50 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:166.22,167.11 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:164.24,165.19 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:162.30,163.25 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:160.7,161.11 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:159.12,160.7 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:152.55,159.12 3 1 +github.com/OpenBazaar/multiwallet/client/pool.go:145.31,148.3 2 1 +github.com/OpenBazaar/multiwallet/client/pool.go:144.47,145.31 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:138.50,142.2 3 1 +github.com/OpenBazaar/multiwallet/client/pool.go:131.53,133.2 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:124.30,128.2 3 1 +github.com/OpenBazaar/multiwallet/client/pool.go:120.2,120.12 1 1 +github.com/OpenBazaar/multiwallet/client/pool.go:116.36,119.3 2 0 +github.com/OpenBazaar/multiwallet/client/pool.go:111.2,116.36 6 1 +github.com/OpenBazaar/multiwallet/client/pool.go:105.62,110.3 4 0 +github.com/OpenBazaar/multiwallet/client/pool.go:101.38,105.62 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:96.86,98.2 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:80.2,93.8 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:76.16,78.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:75.2,76.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:71.27,73.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:70.2,71.27 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:67.16,69.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:66.2,67.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:62.16,64.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:61.2,62.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:58.16,60.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:57.2,58.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:54.16,56.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:50.179,54.16 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:473.82,475.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:468.2,469.12 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:465.16,467.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:464.2,465.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:452.3,462.46 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:449.17,451.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:447.31,449.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:447.2,447.31 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:433.3,445.43 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:430.17,432.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:429.3,430.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:424.55,426.10 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:423.28,424.55 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:421.29,423.28 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:421.2,421.29 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:418.16,420.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:406.57,418.16 5 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:400.26,402.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:398.2,400.26 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:395.26,397.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:392.50,395.26 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:388.58,390.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:383.33,386.2 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:379.2,380.67 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:376.21,378.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:376.2,376.21 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:373.16,375.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:371.87,373.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:367.62,369.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:363.87,365.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:359.2,360.12 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:355.16,357.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:354.2,355.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:351.3,351.50 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:348.17,350.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:346.29,348.17 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:343.73,346.29 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:339.188,341.2 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:335.207,337.2 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:331.190,333.2 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:327.177,329.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:323.94,325.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:318.2,320.20 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:313.27,317.3 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:311.119,313.27 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:307.79,309.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:303.2,304.17 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:300.40,302.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:300.2,300.40 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:295.17,297.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:293.8,295.17 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:290.17,292.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:288.14,290.17 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:283.145,288.14 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:279.68,281.2 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:275.61,277.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:272.2,272.17 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:270.3,270.21 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:263.4,268.29 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:260.10,262.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:258.4,258.23 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:258.23,260.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:255.18,257.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:252.32,255.18 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:251.3,252.32 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:248.17,250.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:244.16,248.17 4 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:242.77,244.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:239.2,239.18 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:235.3,237.15 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:231.18,233.25 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:228.31,230.25 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:226.61,227.26 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:224.62,225.33 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:222.18,223.26 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:221.3,221.10 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:218.21,220.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:214.26,218.21 4 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:214.2,214.26 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:211.16,213.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:208.58,211.16 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:202.66,206.2 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:199.2,199.13 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:196.16,198.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:194.55,196.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:190.75,192.2 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:187.2,187.22 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:184.21,186.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:184.2,184.21 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:181.16,183.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:179.77,181.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:175.73,177.2 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:172.2,172.13 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:169.72,171.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:169.2,169.72 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:166.16,168.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:165.2,166.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:162.16,164.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:160.71,162.16 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:157.2,157.13 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:154.16,156.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:153.2,154.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:150.16,152.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:148.75,150.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:137.2,145.23 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:134.8,136.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:132.18,134.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:129.113,132.18 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:125.59,127.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:121.60,123.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:117.51,119.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:112.8,114.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:110.50,112.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:109.47,110.50 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:105.51,107.2 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/wallet.go:100.33,103.2 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:248.2,248.22 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:245.32,247.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:244.73,245.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:236.2,240.13 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:231.36,232.58 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:229.35,230.58 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:227.26,228.53 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:225.26,226.53 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:223.13,224.42 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:221.2,222.19 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:216.21,219.3 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/txsizes.go:213.113,216.21 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:99.27,101.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:98.2,99.27 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:95.3,95.21 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:92.17,94.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:89.41,92.17 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:83.2,89.41 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:79.3,79.50 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:76.4,77.55 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:73.18,74.13 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:72.4,73.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:69.18,70.13 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:669.2,669.36 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:666.20,668.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:666.2,666.20 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:660.107,662.10 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:659.30,660.107 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:658.32,659.30 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:658.2,658.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:655.16,657.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:653.2,655.16 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:650.34,652.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:649.2,650.34 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:646.16,648.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:645.2,646.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:642.16,644.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:639.94,642.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:636.2,636.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:633.16,635.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:630.2,633.16 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:626.16,628.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:625.2,626.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:615.3,623.28 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:612.30,614.4 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:609.3,612.30 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:606.17,608.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:604.8,606.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:601.3,602.43 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:60.35,69.18 9 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:598.30,600.4 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:595.34,598.30 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:594.2,595.34 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:591.3,591.33 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:588.17,590.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:586.27,588.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:585.2,586.27 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:58.3,60.35 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:579.27,583.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:579.2,579.27 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:575.54,577.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:574.188,575.54 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:569.2,571.25 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:565.41,567.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:564.2,564.15 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:564.15,565.41 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:560.3,561.26 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:557.17,559.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:555.3,557.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:55.17,57.4 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:549.32,551.10 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:548.3,548.29 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:548.29,549.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:543.32,545.10 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:542.29,543.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:539.32,542.29 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:539.2,539.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:535.39,537.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:531.2,535.39 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:525.35,527.4 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:523.23,525.35 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:521.2,523.23 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:52.2,52.136 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:52.136,55.17 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:518.16,520.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:516.2,518.16 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:511.3,512.38 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:508.17,510.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:506.27,508.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:506.2,506.27 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:502.3,504.35 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:499.17,501.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:497.25,499.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:495.207,497.25 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:492.2,492.18 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:49.25,51.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:489.3,490.26 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:486.17,487.12 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:484.25,486.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:483.2,484.25 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:479.16,481.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:476.2,479.16 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:470.35,472.4 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:468.23,470.35 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:466.2,468.23 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:463.16,465.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:461.2,463.16 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:46.2,49.25 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:456.3,457.38 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:453.17,455.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:451.27,453.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:451.2,451.27 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:447.3,449.35 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:444.17,446.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:442.25,444.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:439.190,442.25 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:435.2,436.19 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:432.40,434.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:432.2,432.40 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:43.16,45.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:426.4,427.26 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:423.10,425.5 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:421.18,423.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:420.4,421.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:417.18,419.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:415.9,417.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:414.4,414.33 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:411.18,413.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:406.26,411.18 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:405.31,406.26 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:404.2,405.31 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:399.5,399.29 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:396.19,398.6 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:394.33,396.19 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:391.30,394.33 3 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:389.25,391.30 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:388.2,389.25 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:384.3,384.28 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:381.26,383.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:380.77,381.26 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:380.2,380.77 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:378.3,378.45 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:376.4,376.47 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:373.18,375.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:371.57,373.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:370.88,371.57 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:370.2,370.88 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:37.2,43.16 5 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:366.16,368.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:364.2,366.16 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:361.16,363.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:347.2,361.16 5 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:344.16,346.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:337.2,344.16 5 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:333.17,335.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:330.25,333.17 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:33.89,35.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:327.2,330.25 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:322.3,325.44 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:319.17,321.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:318.3,319.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:315.17,317.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:312.25,315.17 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:309.2,312.25 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:305.16,307.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:304.2,305.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:301.8,303.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:30.136,33.89 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:299.20,301.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:297.177,299.20 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:294.2,294.44 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:291.4,291.29 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:288.18,290.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:281.4,288.18 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:278.18,280.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:277.4,278.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:274.18,276.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:273.4,274.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:270.18,272.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:268.50,270.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:267.26,268.50 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:266.2,267.26 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:262.20,264.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:262.2,262.20 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:259.20,261.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:259.2,259.20 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:256.16,258.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:254.79,256.16 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:245.3,250.9 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:239.4,242.19 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:235.45,238.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:235.4,235.45 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:232.18,234.5 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:230.51,232.18 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:221.3,230.51 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:216.39,218.12 2 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:213.3,216.39 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:209.43,211.4 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:209.3,209.43 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:206.17,208.4 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:204.6,206.17 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:201.2,204.6 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:197.32,199.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:194.172,197.32 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:191.2,191.16 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:189.3,189.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:186.17,188.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:181.31,186.17 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:181.2,181.31 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:178.37,180.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:177.2,178.37 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:175.3,175.46 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:172.10,174.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:169.88,172.10 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:162.2,169.88 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:157.94,159.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:152.2,157.94 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:147.16,149.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:141.2,147.16 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:138.16,140.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:133.102,138.16 4 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:130.2,130.27 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:128.3,128.32 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:125.17,127.4 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:120.42,125.17 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:120.2,120.42 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:117.37,119.3 1 0 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:116.2,117.37 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:111.88,115.3 3 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:108.2,111.88 2 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:103.16,105.3 1 1 +github.com/OpenBazaar/multiwallet/bitcoin/sign.go:102.2,103.16 2 1 diff --git a/vendor/github.com/OpenBazaar/multiwallet/datastore/mock.go b/vendor/github.com/OpenBazaar/multiwallet/datastore/mock.go index 4c70295b9c..07cbed0d38 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/datastore/mock.go +++ b/vendor/github.com/OpenBazaar/multiwallet/datastore/mock.go @@ -11,7 +11,6 @@ import ( "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/chaincfg/chainhash" ) type MockDatastore struct { @@ -74,6 +73,13 @@ func NewMockMultiwalletDatastore() *MockMultiwalletDatastore { &MockTxnStore{txns: make(map[string]*txnStoreEntry)}, &MockWatchedScriptsStore{scripts: make(map[string][]byte)}, }) + db[wallet.Filecoin] = wallet.Datastore(&MockDatastore{ + &MockKeyStore{Keys: make(map[string]*KeyStoreEntry)}, + &MockUtxoStore{utxos: make(map[string]*wallet.Utxo)}, + &MockStxoStore{stxos: make(map[string]*wallet.Stxo)}, + &MockTxnStore{txns: make(map[string]*txnStoreEntry)}, + &MockWatchedScriptsStore{scripts: make(map[string][]byte)}, + }) return &MockMultiwalletDatastore{db: db} } @@ -343,15 +349,15 @@ func (m *MockTxnStore) Put(tx []byte, txid, value string, height int, timestamp return nil } -func (m *MockTxnStore) Get(txid chainhash.Hash) (wallet.Txn, error) { +func (m *MockTxnStore) Get(txid string) (wallet.Txn, error) { m.Lock() defer m.Unlock() - t, ok := m.txns[txid.String()] + t, ok := m.txns[txid] if !ok { return wallet.Txn{}, errors.New("Not found") } return wallet.Txn{ - Txid: txid.String(), + Txid: txid, Value: t.value, Height: int32(t.height), Timestamp: t.timestamp, @@ -378,27 +384,27 @@ func (m *MockTxnStore) GetAll(includeWatchOnly bool) ([]wallet.Txn, error) { return txns, nil } -func (m *MockTxnStore) UpdateHeight(txid chainhash.Hash, height int, timestamp time.Time) error { +func (m *MockTxnStore) UpdateHeight(txid string, height int, timestamp time.Time) error { m.Lock() defer m.Unlock() - txn, ok := m.txns[txid.String()] + txn, ok := m.txns[txid] if !ok { return errors.New("Not found") } txn.height = height txn.timestamp = timestamp - m.txns[txid.String()] = txn + m.txns[txid] = txn return nil } -func (m *MockTxnStore) Delete(txid *chainhash.Hash) error { +func (m *MockTxnStore) Delete(txid string) error { m.Lock() defer m.Unlock() - _, ok := m.txns[txid.String()] + _, ok := m.txns[txid] if !ok { return errors.New("Not found") } - delete(m.txns, txid.String()) + delete(m.txns, txid) return nil } diff --git a/vendor/github.com/OpenBazaar/multiwallet/docker-compose.yml b/vendor/github.com/OpenBazaar/multiwallet/docker-compose.yml deleted file mode 100755 index 0f35d7c82e..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/docker-compose.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: '3' -services: - dev: - build: - context: . - dockerfile: Dockerfile.dev - volumes: - - .:/go/src/github.com/OpenBazaar/multiwallet - security_opt: - - seccomp:unconfined #req: delve for golang diff --git a/vendor/github.com/OpenBazaar/multiwallet/filecoin/addr.go b/vendor/github.com/OpenBazaar/multiwallet/filecoin/addr.go new file mode 100644 index 0000000000..fa1d87d3a4 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/filecoin/addr.go @@ -0,0 +1,56 @@ +package filecoin + +import ( + "github.com/btcsuite/btcd/chaincfg" + faddr "github.com/filecoin-project/go-address" + "strings" +) + +type FilecoinAddress struct { + addr faddr.Address +} + +func NewFilecoinAddress(addrStr string) (*FilecoinAddress, error) { + addr, err := faddr.NewFromString(addrStr) + if err != nil { + return nil, err + } + return &FilecoinAddress{addr: addr}, nil +} + +// String returns the string encoding of the transaction output +// destination. +// +// Please note that String differs subtly from EncodeAddress: String +// will return the value as a string without any conversion, while +// EncodeAddress may convert destination types (for example, +// converting pubkeys to P2PKH addresses) before encoding as a +// payment address string. +func (f *FilecoinAddress) String() string { + return f.addr.String() +} + +// EncodeAddress returns the string encoding of the payment address +// associated with the Address value. See the comment on String +// for how this method differs from String. +func (f *FilecoinAddress) EncodeAddress() string { + return f.addr.String() +} + +// ScriptAddress returns the raw bytes of the address to be used +// when inserting the address into a txout's script. +func (f *FilecoinAddress) ScriptAddress() []byte { + return nil +} + +// IsForNet returns whether or not the address is associated with the +// passed bitcoin network. +func (f *FilecoinAddress) IsForNet(params *chaincfg.Params) bool { + switch params.Name { + case chaincfg.MainNetParams.Name: + return strings.HasPrefix(f.addr.String(), faddr.MainnetPrefix) + case chaincfg.TestNet3Params.Name: + return strings.HasPrefix(f.addr.String(), faddr.TestnetPrefix) + } + return false +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/filecoin/service.go b/vendor/github.com/OpenBazaar/multiwallet/filecoin/service.go new file mode 100644 index 0000000000..360ad41288 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/filecoin/service.go @@ -0,0 +1,315 @@ +package filecoin + +import ( + "encoding/json" + "fmt" + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/model" + "github.com/OpenBazaar/multiwallet/service" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcutil" + "github.com/ipfs/go-cid" + "github.com/op/go-logging" + "math/big" + "sync" + "time" +) + +var Log = logging.MustGetLogger("Filecoin") + +type FilecoinService struct { + db wallet.Datastore + addr btcutil.Address + client model.APIClient + params *chaincfg.Params + coinType wallet.CoinType + + chainHeight uint32 + bestBlock string + cache cache.Cacher + + listeners []func(wallet.TransactionCallback) + + lock sync.RWMutex + + doneChan chan struct{} +} + +func NewFilecoinService(db wallet.Datastore, addr btcutil.Address, client model.APIClient, params *chaincfg.Params, coinType wallet.CoinType, cache cache.Cacher) (*FilecoinService, error) { + fs := &FilecoinService{ + db: db, + addr: addr, + client: client, + params: params, + coinType: coinType, + cache: cache, + doneChan: make(chan struct{}), + lock: sync.RWMutex{}, + } + marshaledHeight, err := cache.Get(fs.bestHeightKey()) + if err != nil { + Log.Info("cached block height missing: using default") + } else { + var hh service.HashAndHeight + if err := json.Unmarshal(marshaledHeight, &hh); err != nil { + Log.Error("failed unmarshaling cached block height") + return fs, nil + } + fs.bestBlock = hh.Hash + fs.chainHeight = hh.Height + } + return fs, nil +} + +func (fs *FilecoinService) Start() { + Log.Noticef("starting FilecoinService") + go fs.run() +} + +func (fs *FilecoinService) run() { + var ( + txChan = fs.client.TransactionNotify() + blockChan = fs.client.BlockNotify() + ) + + fs.client.ListenAddresses(fs.addr) + + for { + select { + case <-fs.doneChan: + return + case tx := <-txChan: + go fs.ProcessIncomingTransaction(tx) + case block := <-blockChan: + go fs.processIncomingBlock(block) + } + } +} + +func (fs *FilecoinService) Stop() { + close(fs.doneChan) +} + +func (fs *FilecoinService) ChainTip() (uint32, string) { + fs.lock.RLock() + defer fs.lock.RUnlock() + return fs.chainHeight, fs.bestBlock +} + +func (fs *FilecoinService) AddTransactionListener(callback func(callback wallet.TransactionCallback)) { + fs.listeners = append(fs.listeners, callback) +} + +func (fs *FilecoinService) InvokeTransactionListeners(callback wallet.TransactionCallback) { + for _, l := range fs.listeners { + go l(callback) + } +} + +// This is a transaction fresh off the wire. Let's save it to the db. +func (fs *FilecoinService) ProcessIncomingTransaction(tx model.Transaction) { + Log.Debugf("new incoming %s transaction: %s", fs.coinType.String(), tx.Txid) + + fs.lock.RLock() + chainHeight := int32(fs.chainHeight) + fs.lock.RUnlock() + fs.saveSingleTxToDB(tx, chainHeight) +} + +func (fs *FilecoinService) UpdateState() { + // Start by fetching the chain height from the API + Log.Debugf("updating %s chain state", fs.coinType.String()) + best, err := fs.client.GetBestBlock() + if err == nil { + Log.Debugf("%s chain height: %d", fs.coinType.String(), best.Height) + fs.lock.Lock() + err = fs.saveHashAndHeight(best.Hash, uint32(best.Height)) + if err != nil { + Log.Errorf("updating %s blockchain height: %s", fs.coinType.String(), err.Error()) + } + fs.lock.Unlock() + } else { + Log.Errorf("error querying API for %s chain height: %s", fs.coinType.String(), err.Error()) + } + + go fs.syncTxs() +} + +func (fs *FilecoinService) syncTxs() { + Log.Debugf("querying for %s transactions", fs.coinType.String()) + query := []btcutil.Address{fs.addr} + txs, err := fs.client.GetTransactions(query) + if err != nil { + Log.Errorf("error downloading txs for %s: %s", fs.coinType.String(), err.Error()) + } else { + Log.Debugf("downloaded %d %s transactions", len(txs), fs.coinType.String()) + fs.lock.RLock() + chainHeight := int32(fs.chainHeight) + fs.lock.RUnlock() + for _, u := range txs { + fs.saveSingleTxToDB(u, chainHeight) + } + } +} + +func (fs *FilecoinService) processIncomingBlock(block model.Block) { + Log.Infof("received new %s block at height %d: %s", fs.coinType.String(), block.Height, block.Hash) + fs.lock.RLock() + currentBest := fs.bestBlock + fs.lock.RUnlock() + + fs.lock.Lock() + err := fs.saveHashAndHeight(block.Hash, uint32(block.Height)) + if err != nil { + Log.Errorf("update %s blockchain height: %s", fs.coinType.String(), err.Error()) + } + fs.lock.Unlock() + + // REORG! Rescan all transactions and utxos to see if anything changed + if currentBest != block.PreviousBlockhash && currentBest != block.Hash { + Log.Warningf("%s chain reorg detected: rescanning wallet", fs.coinType.String()) + fs.UpdateState() + return + } + + // Query db for unconfirmed txs and utxos then query API to get current height + txs, err := fs.db.Txns().GetAll(true) + if err != nil { + Log.Errorf("error loading %s txs from db: %s", fs.coinType.String(), err.Error()) + return + } + for _, tx := range txs { + if tx.Height == 0 { + Log.Debugf("broadcasting unconfirmed txid %s", tx.Txid) + go func(txn wallet.Txn) { + ret, err := fs.client.GetTransaction(txn.Txid) + if err != nil { + Log.Errorf("error fetching unconfirmed %s tx: %s", fs.coinType.String(), err.Error()) + return + } + if ret.Confirmations > 0 { + fs.saveSingleTxToDB(*ret, int32(block.Height)) + return + } + // Rebroadcast unconfirmed transactions + _, err = fs.client.Broadcast(tx.Bytes) + if err != nil { + Log.Errorf("broadcasting unconfirmed utxo: %s", err.Error()) + } + }(tx) + } + } +} + +func (fs *FilecoinService) saveSingleTxToDB(u model.Transaction, chainHeight int32) { + value := new(big.Int) + + height := int32(0) + if u.Confirmations > 0 { + height = chainHeight - (int32(u.Confirmations) - 1) + } + + txHash, err := cid.Decode(u.Txid) + if err != nil { + Log.Errorf("error converting to txHash for %s: %s", fs.coinType.String(), err.Error()) + return + } + var relevant bool + cb := wallet.TransactionCallback{Txid: txHash.String(), Height: height, Timestamp: time.Unix(u.Time, 0)} + for _, in := range u.Inputs { + faddr, err := NewFilecoinAddress(in.Addr) + if err != nil { + Log.Errorf("error parsing address %s: %s", fs.coinType.String(), err.Error()) + continue + } + + v, _ := new(big.Int).SetString(in.ValueIface.(string), 10) + cbin := wallet.TransactionInput{ + LinkedAddress: faddr, + Value: *v, + } + cb.Inputs = append(cb.Inputs, cbin) + + if in.Addr == fs.addr.String() { + relevant = true + value.Sub(value, v) + } + } + for i, out := range u.Outputs { + if len(out.ScriptPubKey.Addresses) == 0 { + continue + } + faddr, err := NewFilecoinAddress(out.ScriptPubKey.Addresses[0]) + if err != nil { + Log.Errorf("error parsing address %s: %s", fs.coinType.String(), err.Error()) + continue + } + + v, _ := new(big.Int).SetString(out.ValueIface.(string), 10) + + cbout := wallet.TransactionOutput{Address: faddr, Value: *v, Index: uint32(i)} + cb.Outputs = append(cb.Outputs, cbout) + + if out.ScriptPubKey.Addresses[0] == fs.addr.String() { + relevant = true + value.Add(value, v) + } + } + + if !relevant { + Log.Warningf("abort saving irrelevant txid (%s) to db", u.Txid) + return + } + + cb.Value = *value + saved, err := fs.db.Txns().Get(txHash.String()) + if err != nil { + ts := time.Now() + if u.Confirmations > 0 { + ts = time.Unix(u.BlockTime, 0) + } + err = fs.db.Txns().Put(u.RawBytes, txHash.String(), value.String(), int(height), ts, false) + if err != nil { + Log.Errorf("putting txid (%s): %s", txHash.String(), err.Error()) + return + } + cb.Timestamp = ts + fs.callbackListeners(cb) + } else if height > 0 { + err := fs.db.Txns().UpdateHeight(txHash.String(), int(height), time.Unix(u.BlockTime, 0)) + if err != nil { + Log.Errorf("updating height for tx (%s): %s", txHash.String(), err.Error()) + return + } + if saved.Height != height { + cb.Timestamp = saved.Timestamp + fs.callbackListeners(cb) + } + } +} + +func (fs *FilecoinService) callbackListeners(cb wallet.TransactionCallback) { + for _, callback := range fs.listeners { + callback(cb) + } +} + +func (fs *FilecoinService) saveHashAndHeight(hash string, height uint32) error { + hh := service.HashAndHeight{ + Height: height, + Hash: hash, + Timestamp: time.Now(), + } + b, err := json.MarshalIndent(&hh, "", " ") + if err != nil { + return err + } + fs.chainHeight = height + fs.bestBlock = hash + return fs.cache.Set(fs.bestHeightKey(), b) +} + +func (fs *FilecoinService) bestHeightKey() string { + return fmt.Sprintf("best-height-%s", fs.coinType.String()) +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/filecoin/wallet.go b/vendor/github.com/OpenBazaar/multiwallet/filecoin/wallet.go new file mode 100644 index 0000000000..74e729afcf --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/filecoin/wallet.go @@ -0,0 +1,493 @@ +package filecoin + +import ( + "fmt" + "github.com/OpenBazaar/multiwallet/keys" + "github.com/btcsuite/btcd/btcec" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/sigs" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "io" + "math/big" + "time" + + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/client" + "github.com/OpenBazaar/multiwallet/config" + "github.com/OpenBazaar/multiwallet/model" + wi "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcutil" + hd "github.com/btcsuite/btcutil/hdkeychain" + faddr "github.com/filecoin-project/go-address" + "github.com/op/go-logging" + "github.com/tyler-smith/go-bip39" + "golang.org/x/net/proxy" +) + +type FilecoinWallet struct { + db wi.Datastore + params *chaincfg.Params + client model.APIClient + + mPrivKey *hd.ExtendedKey + mPubKey *hd.ExtendedKey + + addr faddr.Address + key *btcec.PrivateKey + + fs *FilecoinService + + exchangeRates wi.ExchangeRates + log *logging.Logger +} + +var ( + _ = wi.Wallet(&FilecoinWallet{}) + FilecoinCurrencyDefinition = wi.CurrencyDefinition{ + Code: "FIL", + Divisibility: 18, + } +) + +func NewFilecoinWallet(cfg config.CoinConfig, mnemonic string, params *chaincfg.Params, proxy proxy.Dialer, cache cache.Cacher, disableExchangeRates bool) (*FilecoinWallet, error) { + seed := bip39.NewSeed(mnemonic, "") + + mPrivKey, err := hd.NewMaster(seed, params) + if err != nil { + return nil, err + } + mPubKey, err := mPrivKey.Neuter() + if err != nil { + return nil, err + } + + _, external, err := keys.Bip44Derivation(mPrivKey, wi.Filecoin) + if err != nil { + return nil, err + } + + accountHDKey, err := external.Child(0) + if err != nil { + return nil, err + } + + accountECKey, err := accountHDKey.ECPrivKey() + if err != nil { + return nil, err + } + + accountAddr, err := faddr.NewSecp256k1Address(accountECKey.PubKey().SerializeUncompressed()) + if err != nil { + return nil, err + } + + c, err := client.NewClientPool(cfg.ClientAPIs, proxy) + if err != nil { + return nil, err + } + + fs, err := NewFilecoinService(cfg.DB, &FilecoinAddress{addr: accountAddr}, c, params, wi.Filecoin, cache) + if err != nil { + return nil, err + } + + return &FilecoinWallet{ + db: cfg.DB, + params: params, + client: c, + addr: accountAddr, + mPrivKey: mPrivKey, + mPubKey: mPubKey, + key: accountECKey, + fs: fs, + log: logging.MustGetLogger("litecoin-wallet"), + }, nil +} + +func (w *FilecoinWallet) Start() { + w.client.Start() + w.fs.Start() +} + +func (w *FilecoinWallet) Params() *chaincfg.Params { + return w.params +} + +func (w *FilecoinWallet) CurrencyCode() string { + if w.params.Name == chaincfg.MainNetParams.Name { + return "fil" + } else { + return "tfil" + } +} + +func (w *FilecoinWallet) IsDust(amount big.Int) bool { + // TODO + return false +} + +func (w *FilecoinWallet) MasterPrivateKey() *hd.ExtendedKey { + return w.mPrivKey +} + +func (w *FilecoinWallet) MasterPublicKey() *hd.ExtendedKey { + return w.mPubKey +} + +func (w *FilecoinWallet) ChildKey(keyBytes []byte, chaincode []byte, isPrivateKey bool) (*hd.ExtendedKey, error) { + parentFP := []byte{0x00, 0x00, 0x00, 0x00} + var id []byte + if isPrivateKey { + id = w.params.HDPrivateKeyID[:] + } else { + id = w.params.HDPublicKeyID[:] + } + hdKey := hd.NewExtendedKey( + id, + keyBytes, + chaincode, + parentFP, + 0, + 0, + isPrivateKey) + return hdKey.Child(0) +} + +func (w *FilecoinWallet) CurrentAddress(purpose wi.KeyPurpose) btcutil.Address { + return &FilecoinAddress{addr: w.addr} +} + +func (w *FilecoinWallet) NewAddress(purpose wi.KeyPurpose) btcutil.Address { + return &FilecoinAddress{addr: w.addr} +} + +func (w *FilecoinWallet) DecodeAddress(addr string) (btcutil.Address, error) { + a, err := faddr.NewFromString(addr) + if err != nil { + return nil, err + } + return &FilecoinAddress{addr: a}, nil +} + +func (w *FilecoinWallet) ScriptToAddress(script []byte) (btcutil.Address, error) { + return w.DecodeAddress(string(script)) +} + +func (w *FilecoinWallet) AddressToScript(addr btcutil.Address) ([]byte, error) { + return []byte(addr.String()), nil +} + +func (w *FilecoinWallet) HasKey(addr btcutil.Address) bool { + return w.addr.String() == addr.String() +} + +func (w *FilecoinWallet) Balance() (wi.CurrencyValue, wi.CurrencyValue) { + txns, _ := w.db.Txns().GetAll(false) + confirmed, unconfirmed := big.NewInt(0), big.NewInt(0) + for _, tx := range txns { + val, _ := new(big.Int).SetString(tx.Value, 10) + if val.Cmp(big.NewInt(0)) > 0 { + if tx.Height > 0 { + confirmed.Add(confirmed, val) + } else { + unconfirmed.Add(confirmed, val) + } + } else if val.Cmp(big.NewInt(0)) < 0 { + if tx.Height > 0 { + confirmed.Sub(confirmed, val) + } else { + unconfirmed.Sub(confirmed, val) + } + } + } + return wi.CurrencyValue{Value: *confirmed, Currency: FilecoinCurrencyDefinition}, + wi.CurrencyValue{Value: *unconfirmed, Currency: FilecoinCurrencyDefinition} +} + +func (w *FilecoinWallet) Transactions() ([]wi.Txn, error) { + height, _ := w.ChainTip() + txns, err := w.db.Txns().GetAll(false) + if err != nil { + return txns, err + } + for i, tx := range txns { + var confirmations int32 + var status wi.StatusCode + confs := int32(height) - tx.Height + 1 + if tx.Height <= 0 { + confs = tx.Height + } + switch { + case confs < 0: + status = wi.StatusDead + case confs == 0 && time.Since(tx.Timestamp) <= time.Hour*6: + status = wi.StatusUnconfirmed + case confs == 0 && time.Since(tx.Timestamp) > time.Hour*6: + status = wi.StatusDead + case confs > 0 && confs < 24: + status = wi.StatusPending + confirmations = confs + case confs > 23: + status = wi.StatusConfirmed + confirmations = confs + } + tx.Confirmations = int64(confirmations) + tx.Status = status + txns[i] = tx + } + return txns, nil +} + +func (w *FilecoinWallet) GetTransaction(txid string) (wi.Txn, error) { + txn, err := w.db.Txns().Get(txid) + return txn, err +} + +func (w *FilecoinWallet) ChainTip() (uint32, string) { + return w.fs.ChainTip() +} + +func (w *FilecoinWallet) GetFeePerByte(feeLevel wi.FeeLevel) big.Int { + return *big.NewInt(0) +} + +func (w *FilecoinWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (string, error) { + address, err := faddr.NewFromString(addr.String()) + if err != nil { + return "", err + } + if spendAll { + c, u := w.Balance() + amount = *c.Value.Add(big.NewInt(0), &u.Value) + } + bigAmt, err := types.BigFromString(amount.String()) + if err != nil { + return "", err + } + + txns, err := w.Transactions() + if err != nil { + return "", err + } + + nonce := uint64(0) + for _, tx := range txns { + val, _ := new(big.Int).SetString(tx.Value, 10) + if val.Cmp(big.NewInt(0)) > 0 { + continue + } + + m, err := types.DecodeMessage(tx.Bytes) + if err != nil { + return "", err + } + if m.Nonce > nonce { + nonce = m.Nonce + } + } + if nonce > 0 { + nonce++ + } + + m := types.Message{ + To: address, + Value: bigAmt, + From: w.addr, + GasLimit: 1000, + Nonce: nonce, + } + + id := m.Cid() + + cs, err := sigs.Sign(crypto.SigTypeSecp256k1, w.key.Serialize(), id.Bytes()) + if err != nil { + return "", err + } + + signed := &types.SignedMessage{ + Message: m, + Signature: *cs, + } + + // Broadcast + if err := w.Broadcast(signed); err != nil { + return "", err + } + + myAddr, err := NewFilecoinAddress(w.addr.String()) + if err != nil { + return "", err + } + w.AssociateTransactionWithOrder(wi.TransactionCallback{ + Timestamp: time.Now(), + Outputs: []wi.TransactionOutput{ + { + Address: addr, + Value: amount, + OrderID: referenceID, + }, + }, + Inputs: []wi.TransactionInput{ + { + Value: amount, + OrderID: referenceID, + LinkedAddress: myAddr, + }, + }, + Value: *amount.Mul(&amount, big.NewInt(-1)), + Txid: signed.Cid().String(), + }) + + return signed.Cid().String(), nil +} + +func (w *FilecoinWallet) EstimateFee(ins []wi.TransactionInput, outs []wi.TransactionOutput, feePerByte big.Int) big.Int { + return *big.NewInt(1000) // TODO: find right fee +} + +func (w *FilecoinWallet) EstimateSpendFee(amount big.Int, feeLevel wi.FeeLevel) (big.Int, error) { + return *big.NewInt(1000), nil // TODO: find right fee +} + +func (w *FilecoinWallet) AddWatchedAddresses(addrs ...btcutil.Address) error { + var watchedScripts [][]byte + for _, addr := range addrs { + if !w.HasKey(addr) { + script, err := w.AddressToScript(addr) + if err != nil { + return err + } + watchedScripts = append(watchedScripts, script) + } + } + + err := w.db.WatchedScripts().PutAll(watchedScripts) + if err != nil { + return err + } + + w.client.ListenAddresses(addrs...) + return nil +} + +func (w *FilecoinWallet) AddWatchedScript(script []byte) error { + err := w.db.WatchedScripts().Put(script) + if err != nil { + return err + } + addr, err := w.ScriptToAddress(script) + if err != nil { + return err + } + w.client.ListenAddresses(addr) + return nil +} + +func (w *FilecoinWallet) AddTransactionListener(callback func(wi.TransactionCallback)) { + w.fs.AddTransactionListener(callback) +} + +func (w *FilecoinWallet) ReSyncBlockchain(fromTime time.Time) { + go w.fs.UpdateState() +} + +func (w *FilecoinWallet) GetConfirmations(txid string) (uint32, uint32, error) { + txn, err := w.db.Txns().Get(txid) + if err != nil { + return 0, 0, err + } + if txn.Height == 0 { + return 0, 0, nil + } + chainTip, _ := w.ChainTip() + return chainTip - uint32(txn.Height) + 1, uint32(txn.Height), nil +} + +func (w *FilecoinWallet) Close() { + w.fs.Stop() + w.client.Close() +} + +func (w *FilecoinWallet) ExchangeRates() wi.ExchangeRates { + return w.exchangeRates +} + +func (w *FilecoinWallet) DumpTables(wr io.Writer) { + fmt.Fprintln(wr, "Transactions-----") + txns, _ := w.db.Txns().GetAll(true) + for _, tx := range txns { + fmt.Fprintf(wr, "Hash: %s, Height: %d, Value: %s, WatchOnly: %t\n", tx.Txid, int(tx.Height), tx.Value, tx.WatchOnly) + } + fmt.Fprintln(wr, "\nUtxos-----") + utxos, _ := w.db.Utxos().GetAll() + for _, u := range utxos { + fmt.Fprintf(wr, "Hash: %s, Index: %d, Height: %d, Value: %s, WatchOnly: %t\n", u.Op.Hash.String(), int(u.Op.Index), int(u.AtHeight), u.Value, u.WatchOnly) + } + fmt.Fprintln(wr, "\nKeys-----") + keys, _ := w.db.Keys().GetAll() + unusedInternal, _ := w.db.Keys().GetUnused(wi.INTERNAL) + unusedExternal, _ := w.db.Keys().GetUnused(wi.EXTERNAL) + internalMap := make(map[int]bool) + externalMap := make(map[int]bool) + for _, k := range unusedInternal { + internalMap[k] = true + } + for _, k := range unusedExternal { + externalMap[k] = true + } + + for _, k := range keys { + var used bool + if k.Purpose == wi.INTERNAL { + used = internalMap[k.Index] + } else { + used = externalMap[k.Index] + } + fmt.Fprintf(wr, "KeyIndex: %d, Purpose: %d, Used: %t\n", k.Index, k.Purpose, used) + } +} + +// Build a client.Transaction so we can ingest it into the wallet service then broadcast +func (w *FilecoinWallet) Broadcast(msg *types.SignedMessage) error { + id := msg.Cid() + ser, err := msg.Serialize() + if err != nil { + return err + } + + cTxn := model.Transaction{ + Txid: id.String(), + Version: int(msg.Message.Version), + Confirmations: 0, + Time: time.Now().Unix(), + RawBytes: ser, + Inputs: []model.Input{ + { + Addr: w.addr.String(), + ValueIface: msg.Message.Value.String(), + }, + }, + Outputs: []model.Output{ + { + ScriptPubKey: model.OutScript{ + Addresses: []string{msg.Message.To.String()}, + }, + ValueIface: msg.Message.Value.String(), + }, + }, + } + + _, err = w.client.Broadcast(ser) + if err != nil { + return err + } + w.fs.ProcessIncomingTransaction(cTxn) + return nil +} + +// AssociateTransactionWithOrder used for ORDER_PAYMENT message +func (w *FilecoinWallet) AssociateTransactionWithOrder(cb wi.TransactionCallback) { + w.fs.InvokeTransactionListeners(cb) +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/go.mod b/vendor/github.com/OpenBazaar/multiwallet/go.mod new file mode 100644 index 0000000000..9dca9fd95a --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/go.mod @@ -0,0 +1,49 @@ +module github.com/OpenBazaar/multiwallet + +go 1.14 + +require ( + github.com/OpenBazaar/go-ethwallet v0.0.0-20200604192229-31db816d691c + github.com/OpenBazaar/golang-socketio v0.0.0-20200109001351-4147b5f0d294 + github.com/OpenBazaar/openbazaar-go v0.14.3 // indirect + github.com/OpenBazaar/spvwallet v0.0.0-20200112224336-39f04e8d6d34 + github.com/OpenBazaar/wallet-interface v0.0.0-20200720181501-d30f5eb54286 + github.com/btcsuite/btcd v0.20.1-beta + github.com/btcsuite/btcutil v1.0.2 + github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0 + github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 + github.com/btcsuite/golangcrypto v0.0.0-20150304025918-53f62d9b43e8 + github.com/cenkalti/backoff v2.2.1+incompatible + github.com/cpacia/bchutil v0.0.0-20181003130114-b126f6a35b6c + github.com/ethereum/go-ethereum v1.9.15 // indirect + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20201006125140-a62d00da59d1 // indirect + github.com/filecoin-project/go-address v0.0.4 + github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f + github.com/filecoin-project/lotus v1.1.0 + github.com/gcash/bchd v0.16.4 + github.com/golang/protobuf v1.4.2 + github.com/gorilla/websocket v1.4.2 + github.com/hunterlong/tokenbalance v0.0.12-0.20191105170207-4f98e641e619 // indirect + github.com/ipfs/go-cid v0.0.7 + github.com/jarcoal/httpmock v1.0.5 + github.com/jessevdk/go-flags v1.4.0 + github.com/joho/godotenv v1.3.0 // indirect + github.com/ltcsuite/ltcd v0.20.1-beta + github.com/ltcsuite/ltcutil v1.0.2-beta + github.com/ltcsuite/ltcwallet/wallet/txrules v1.0.0 + github.com/mattn/go-runewidth v0.0.8 // indirect + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 + github.com/nanmu42/etherscan-api v1.1.1 // indirect + github.com/olekukonko/tablewriter v0.0.4 // indirect + github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 + github.com/prometheus/tsdb v0.7.1 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/sirupsen/logrus v1.7.0 // indirect + github.com/stretchr/objx v0.2.0 // indirect + github.com/tyler-smith/go-bip39 v1.0.2 + golang.org/x/crypto v0.0.0-20200707235045-ab33eee955e0 + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/net v0.0.0-20200707034311-ab3426394381 + google.golang.org/grpc v1.30.0 + honnef.co/go/tools v0.0.1-2020.1.3 // indirect +) diff --git a/vendor/github.com/OpenBazaar/multiwallet/go.sum b/vendor/github.com/OpenBazaar/multiwallet/go.sum new file mode 100644 index 0000000000..ae57bed105 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/go.sum @@ -0,0 +1,2052 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= +contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenBazaar/go-ethwallet v0.0.0-20200604192229-31db816d691c h1:+dB9aoL2MH7IN4Tcxa4e6ybEDS48kvImYcKRiH27ITE= +github.com/OpenBazaar/go-ethwallet v0.0.0-20200604192229-31db816d691c/go.mod h1:AQS5qGhP8yd5kv3d+pFDyxEPIsiJr35vwXC1W1ttkdw= +github.com/OpenBazaar/golang-socketio v0.0.0-20200109001351-4147b5f0d294 h1:ic05Fdz58zT1On4uleoZjhlxCIGtiwmU421MksguxSE= +github.com/OpenBazaar/golang-socketio v0.0.0-20200109001351-4147b5f0d294/go.mod h1:sEz//wpQju9ZgIZKKSVf0R5tok3eEqDpKL+Q6WzWXz8= +github.com/OpenBazaar/jsonpb v0.0.0-20171123000858-37d32ddf4eef/go.mod h1:55mCznBcN9WQgrtgaAkv+p2LxeW/tQRdidyyE9D0I5k= +github.com/OpenBazaar/openbazaar-go v0.14.3 h1:8GLYjZp+vZFrsonyj4a8q/JJucgMemN1Ssw/xqzA8LQ= +github.com/OpenBazaar/openbazaar-go v0.14.3/go.mod h1:IPD0UJLYifq0e5ZiJ1oXpKyg1raiRvmU0Mq17sKj/S0= +github.com/OpenBazaar/spvwallet v0.0.0-20200112224336-39f04e8d6d34 h1:xGvs105pXKoIVhYc5SaxbLOzvriX61KYB9oSinjFGUk= +github.com/OpenBazaar/spvwallet v0.0.0-20200112224336-39f04e8d6d34/go.mod h1:SVavvqIp6t5kuJx+PqDsKIQ+avRth92nGg2wVp7mW/s= +github.com/OpenBazaar/wallet-interface v0.0.0-20200720181501-d30f5eb54286 h1:gHiOKvfKlJwmkX6lfjEuCx13PrMAXdMqjUQzA4eqWSg= +github.com/OpenBazaar/wallet-interface v0.0.0-20200720181501-d30f5eb54286/go.mod h1:KiLnq+35bzKd6Bq8EP8iGElNBU/++VxbDVg9zCvKMgU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/Stebalien/go-bitfield v0.0.0-20180330043415-076a62f9ce6e/go.mod h1:3oM7gXIttpYDAJXpVNnSCiUMYBLIZ6cb1t+Ip982MRo= +github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= +github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A= +github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= +github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0 h1:KGHMW5sd7yDdDMkCZ/JpP0KltolFsQcB973brBnfj4c= +github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= +github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 h1:2VsfS0sBedcM5KmDzRMT3+b6xobqWveZGvjb+jFez5w= +github.com/btcsuite/btcwallet/wallet/txrules v1.0.0/go.mod h1:UwQE78yCerZ313EXZwEiu3jNAtfXj2n2+c8RWiE/WNA= +github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0 h1:6DxkcoMnCPY4E9cUDPB5tbuuf40SmmMkSQkoE8vCT+s= +github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/golangcrypto v0.0.0-20150304025918-53f62d9b43e8 h1:nOsAWScwueMVk/VLm/dvQQD7DuanyvAUb6B3P3eT274= +github.com/btcsuite/golangcrypto v0.0.0-20150304025918-53f62d9b43e8/go.mod h1:tYvUd8KLhm/oXvUeSEs2VlLghFjQt9+ZaF9ghH0JNjc= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0 h1:Tvd0BfvqX9o823q1j2UZ/epQo09eJh6dTcRp79ilIN4= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJGQE= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= +github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpacia/bchutil v0.0.0-20181003130114-b126f6a35b6c h1:4e6Zsb6LFd3kadoMiut2zcd3hCb4zywpJnQa8+NV2Cs= +github.com/cpacia/bchutil v0.0.0-20181003130114-b126f6a35b6c/go.mod h1:k5D13LCXSsMrQyfdW0yGYs4GWUvirqoxHht8qwtqyRY= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= +github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/dchest/siphash v1.2.1 h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4= +github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4= +github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0= +github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= +github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= +github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= +github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= +github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32/go.mod h1:0sQEVg+ngs1jaDPVIiEgY0lbENWJPaUlWxGHEaSmKVM= +github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= +github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= +github.com/drand/kyber v1.1.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= +github.com/drand/kyber-bls12381 v0.1.0/go.mod h1:N1emiHpm+jj7kMlxEbu3MUyOiooTgNySln564cgD9mk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c h1:JHHhtb9XWJrGNMcrVP6vyzO4dusgi/HnceHTgxSejUM= +github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ethereum/go-ethereum v1.9.15 h1:wrWl+QrtutRUJ9LZXdUqBoGoo2b1tOCYRDrAOQhCY3A= +github.com/ethereum/go-ethereum v1.9.15/go.mod h1:slT8bPPRhXsyNTwHQxrOnjuTZ1sDXRajW11EkJ84QJ0= +github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= +github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= +github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d h1:YVh0Q+1iUvbv7SIfwA/alULOlWjQNOEnV72rgeYweLY= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d/go.mod h1:XE4rWG1P7zWPaC11Pkn1CVR20stqN52MnMkIrF4q6ZU= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20201006125140-a62d00da59d1 h1:tQWTejWA9P2TQLvUlzJeaCenqhswhfXm6zLRiUE9CIw= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20201006125140-a62d00da59d1/go.mod h1:qby9lZcWbuZJ+9qqu5jUAnSaQ95XsxdT3cavjY9YLO4= +github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= +github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsgLUl0I0KLjo8w= +github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= +github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= +github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= +github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.1 h1:S6Uuqcspqu81sWJ0He4OAfFLm1tSwPdVjtKTkl5m/xQ= +github.com/filecoin-project/go-bitfield v0.2.1/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-data-transfer v0.9.0 h1:nTT8j7Hu3TM0wRWrGy83/ctawG7sleJGdFWtIsUsKgY= +github.com/filecoin-project/go-data-transfer v0.9.0/go.mod h1:i2CqUy7TMQGKukj9BgqIxiP8nDHDXU2VLd771KVaCaQ= +github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= +github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-markets v1.0.0 h1:np9+tlnWXh9xYG4oZfha6HZFLYOaAZoMGR3V4w6DM48= +github.com/filecoin-project/go-fil-markets v1.0.0/go.mod h1:lXExJyYHwpMMddCqhEdNrc7euYJKNkp04K76NZqJLGg= +github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= +github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= +github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= +github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f h1:TZDTu4MtBKSFLXWGKLy+cvC3nHfMFIrVgWLAz/+GgZQ= +github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= +github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= +github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= +github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= +github.com/filecoin-project/lotus v1.1.0 h1:ka2I5FcIXtUkBcTLnNj4gLBVpA7f7WVKG5wr69nmWQs= +github.com/filecoin-project/lotus v1.1.0/go.mod h1:TCDAZYleaxC4NdKBsp0n+jscID12SxPoeYeHMCTT+TQ= +github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= +github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= +github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk= +github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= +github.com/filecoin-project/specs-actors/v2 v2.2.0 h1:IyCICb0NHYeD0sdSqjVGwWydn/7r7xXuxdpvGAcRCGY= +github.com/filecoin-project/specs-actors/v2 v2.2.0/go.mod h1:rlv5Mx9wUhV8Qsz+vUezZNm+zL4tK08O0HreKKPB2Wc= +github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= +github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= +github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= +github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gcash/bchd v0.14.7/go.mod h1:Gk/O1ktRVW5Kao0RsnVXp3bWxeYQadqawZ1Im9HE78M= +github.com/gcash/bchd v0.15.2/go.mod h1:k9wIjgwnhbrAw+ruIPZ2tHZMzfFNdyUnORZZX7lqXGY= +github.com/gcash/bchd v0.16.4 h1:+aq3sk3MDTLLwfDldvJaQBbpALCiDMH1bT32qIeHYos= +github.com/gcash/bchd v0.16.4/go.mod h1:gR67ljCexTNwbKYN3wjbRHi9lYLp4rMomy1UQ3E1USA= +github.com/gcash/bchlog v0.0.0-20180913005452-b4f036f92fa6 h1:3pZvWJ8MSfWstGrb8Hfh4ZpLyZNcXypcGx2Ju4ZibVM= +github.com/gcash/bchlog v0.0.0-20180913005452-b4f036f92fa6/go.mod h1:PpfmXTLfjRp7Tf6v/DCGTRXHz+VFbiRcsoUxi7HvwlQ= +github.com/gcash/bchutil v0.0.0-20190625002603-800e62fe9aff/go.mod h1:zXSP0Fg2L52wpSEDApQDQMiSygnQiK5HDquDl0a5BHg= +github.com/gcash/bchutil v0.0.0-20191012211144-98e73ec336ba/go.mod h1:nUIrcbbtEQdCsRwcp+j/CndDKMQE9Fi8p2F8cIZmIqI= +github.com/gcash/bchutil v0.0.0-20200229194731-128fc9884722 h1:oeBQvSvKXcDbyoUbyeveB99CHJWgQfxiV9gKcPrXfhs= +github.com/gcash/bchutil v0.0.0-20200229194731-128fc9884722/go.mod h1:wB++2ZcHUvGLN1OgO9swBmJK1vmyshJLW9SNS+apXwc= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= +github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3vH7VqgtMxQ= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hunterlong/tokenbalance v0.0.12-0.20191105170207-4f98e641e619 h1:kVdhNuTJN8YZiZx5JlCOuJE4c0VhfC+WCgDGDBPyZxQ= +github.com/hunterlong/tokenbalance v0.0.12-0.20191105170207-4f98e641e619/go.mod h1:np6MtV0Z17TTSmmT3dfGkc0OMD/u/6z+Kflc8IhJBec= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/improbable-eng/grpc-web v0.9.1/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= +github.com/improbable-eng/grpc-web v0.12.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.0.3/go.mod h1:jadAZYsP/tcRMl47ZhFxhaNuDQoXawT8iHMg+iFoQbg= +github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-bitswap v0.2.20 h1:Zfi5jDUoqxDThORUznqdeL77DdGniAzlccNJ4vr+Itc= +github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRMtYNGrlxZ8KuI= +github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= +github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s= +github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= +github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= +github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= +github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= +github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= +github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= +github.com/ipfs/go-graphsync v0.3.0 h1:I6Y20kSuCWkUvPoUWo4V3am704/9QjgDVVkf0zIV8+8= +github.com/ipfs/go-graphsync v0.3.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-graphsync v0.3.1 h1:dJLYrck4oyJDfMVhGEKiWHxaY8oYMWko4m2Fi+4bofo= +github.com/ipfs/go-graphsync v0.3.1/go.mod h1:bw4LiLM5Oq/uLdzEtih9LK8GrwSijv+XqYiWCTxHMqs= +github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= +github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= +github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v1.0.0 h1:pmFp5sFYsYVvMOp9X01AK3s85usVcLvkBTRsN6SnfUA= +github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= +github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w= +github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-cmds v0.1.0/go.mod h1:TiK4e7/V31tuEb8YWDF8lN3qrnDH+BS7ZqWIeYJlAs8= +github.com/ipfs/go-ipfs-config v0.0.11/go.mod h1:wveA8UT5ywN26oKStByzmz1CO6cXwLKKM6Jn/Hfw08I= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= +github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA= +github.com/ipfs/go-ipfs-http-client v0.0.5/go.mod h1:8EKP9RGUrUex4Ff86WhnKU7seEBOtjdgXlY9XHYvYMw= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= +github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= +github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf h1:PRCy+w3GocY77CBEwTprp6hn7PLiEU1YToKe7B+1FVk= +github.com/ipfs/go-ipld-cbor v0.0.5-0.20200428170625-a0bd04d3cbdf/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipns v0.0.2/go.mod h1:WChil4e0/m9cIINWLxZe1Jtf77oz5L05rO2ei/uKJ5U= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 h1:3bijxqzQ1O9yg7gd7Aqk80oaEvsJ+uXw0zSvi2qR3Jw= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= +github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= +github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-path v0.0.3/go.mod h1:zIRQUez3LuQIU25zFjC2hpBTHimWx7VK5bjZgRLbbdo= +github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= +github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= +github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8= +github.com/ipfs/go-unixfs v0.2.1/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= +github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipfs/interface-go-ipfs-core v0.2.3/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= +github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= +github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= +github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 h1:6phjU3kXvCEWOZpu+Ob0w6DzgPFZmDLgLPxJhD8RxEY= +github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= +github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA= +github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= +github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jarcoal/httpmock v1.0.5 h1:cHtVEcTxRSX4J0je7mWPfc9BpDpqzXSJ5HbymZmyHck= +github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v0.0.0-20181221193153-c0795c8afcf4/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kabukky/httpscerts v0.0.0-20150320125433-617593d7dcb3/go.mod h1:BYpt4ufZiIGv2nXn4gMxnfKV306n3mWXgNu/d2TqdTU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw= +github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= +github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= +github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= +github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= +github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= +github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.0.2/go.mod h1:Qu8bWqFXiocPloabFGUcVG4kk94fLvfC8mWTDdFC9wE= +github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= +github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= +github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= +github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= +github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= +github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= +github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= +github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= +github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= +github.com/libp2p/go-libp2p v0.10.0 h1:7ooOvK1wi8eLpyTppy8TeH43UHy5uI75GAHGJxenUi0= +github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= +github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE= +github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE= +github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= +github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= +github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= +github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= +github.com/libp2p/go-libp2p-autonat v0.2.3 h1:w46bKK3KTOUWDe5mDYMRjJu1uryqBp8HCNDp/TWMqKw= +github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= +github.com/libp2p/go-libp2p-autonat v0.3.2 h1:OhDSwVVaq7liTaRIsFFYvsaPp0pn2yi0WazejZ4DUmo= +github.com/libp2p/go-libp2p-autonat v0.3.2/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U= +github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= +github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-blankhost v0.1.6 h1:CkPp1/zaCrCnBo0AdsQA0O1VkUYoUOtyHOnoa8gKIcE= +github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= +github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= +github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-circuit v0.0.1/go.mod h1:Dqm0s/BiV63j8EEAs8hr1H5HudqvCAeXxDyic59lCwE= +github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.3/go.mod h1:Xqh2TjSy8DD5iV2cCOMzdynd6h8OTBGoV1AWbWor3qM= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= +github.com/libp2p/go-libp2p-circuit v0.2.2 h1:87RLabJ9lrhoiSDDZyCJ80ZlI5TLJMwfyoGAaWXzWqA= +github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-circuit v0.2.3 h1:3Uw1fPHWrp1tgIhBz0vSOxRUmnKL8L/NGUyEd5WfSGM= +github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-circuit v0.3.1 h1:69ENDoGnNN45BNDnBd+8SXSetDuw0eJFcGmOvvtOgBw= +github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4= +github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= +github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= +github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= +github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.0.6/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.3/go.mod h1:GqhyQqyIAPsxFYXHMjfXgMv03lxsvM0mFzuYA9Ib42A= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.2/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= +github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.5.7 h1:QK3xRwFxqd0Xd9bSZL+8yZ8ncZZbl6Zngd/+Y+A6sgQ= +github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0 h1:u03qofNYTBN+yVg08PuAKylZogVf0xcTEeM8skGf+ak= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs= +github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= +github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-daemon v0.2.2/go.mod h1:kyrpsLB2JeNYR2rvXSVWyY0iZuRIMhqzWR3im9BV6NQ= +github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI= +github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= +github.com/libp2p/go-libp2p-discovery v0.4.0 h1:dK78UhopBk48mlHtRCzbdLm3q/81g77FahEBTjcqQT8= +github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= +github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ= +github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= +github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= +github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= +github.com/libp2p/go-libp2p-kad-dht v0.8.3/go.mod h1:HnYYy8taJWESkqiESd1ngb9XX/XGGsMA5G0Vj2HoSh4= +github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= +github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= +github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= +github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-mplex v0.2.3 h1:2zijwaJvpdesST2MXpI5w9wWFRgYtMcpRX7rrw0jmOo= +github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-mplex v0.2.4 h1:XFFXaN4jhqnIuJVjYOR3k6bnRj0mFfJOlIuDVww+4Zo= +github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE= +github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= +github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= +github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= +github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= +github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= +github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= +github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= +github.com/libp2p/go-libp2p-peerstore v0.2.4 h1:jU9S4jYN30kdzTpDAR7SlHUD+meDUjTODh4waLWF1ws= +github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= +github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= +github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= +github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= +github.com/libp2p/go-libp2p-pubsub v0.3.6 h1:9oO8W7qIWCYQYyz5z8nUsPcb3rrFehBlkbqvbSVjBxY= +github.com/libp2p/go-libp2p-pubsub v0.3.6/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI= +github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= +github.com/libp2p/go-libp2p-quic-transport v0.5.0 h1:BUN1lgYNUrtv4WLLQ5rQmC9MCJ6uEXusezGvYRNoJXE= +github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= +github.com/libp2p/go-libp2p-quic-transport v0.8.2/go.mod h1:L+e0q15ZNaYm3seHgbsXjWP8kXLEqz+elLWKk9l8DhM= +github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-record v0.1.2 h1:M50VKzWnmUrk/M5/Dz99qO9Xh4vs8ijsK+7HkJvRP+0= +github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= +github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= +github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= +github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= +github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= +github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= +github.com/libp2p/go-libp2p-secio v0.0.1/go.mod h1:IdG6iQybdcYmbTzxp4J5dwtUEDTOvZrT0opIDVNPrJs= +github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-secio v0.2.2 h1:rLLPvShPQAcY6eNurKNZq3eZjPWfU9kXF2eI9jIYdrg= +github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= +github.com/libp2p/go-libp2p-swarm v0.0.1/go.mod h1:mh+KZxkbd3lQnveQ3j2q60BM1Cw2mX36XXQqwfPOShs= +github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= +github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= +github.com/libp2p/go-libp2p-swarm v0.2.7 h1:4lV/sf7f0NuVqunOpt1I11+Z54+xp+m0eeAvxj/LyRc= +github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= +github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg= +github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= +github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8 h1:v4dvk7YEW8buwCdIVWnhpv0Hp/AAJKRWIxBhmLRZrsk= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= +github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= +github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= +github.com/libp2p/go-libp2p-transport v0.0.4/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= +github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m+JmZCe5RUXG10UMEx4kWe9Ipj5c= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= +github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= +github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= +github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= +github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/libp2p/go-mplex v0.0.1/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI= +github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= +github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-netroute v0.1.2 h1:UHhB35chwgvcRI392znJA3RCBtZ3MpE3ahNCN5MR4Xg= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.5 h1:pQkejVhF0xp08D4CQUcw8t+BFJeXowja6RVcb5p++EA= +github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport-transport v0.0.1/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M= +github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= +github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= +github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= +github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-tcp-transport v0.0.1/go.mod h1:mnjg0o0O5TmXUaUIanYPUqkW4+u6mK0en8rlpA6BBTs= +github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWciG5LyYAPo= +github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.1 h1:ExZiVQV+h+qL16fzCWtd1HSzPsqWottJ8KXwWaVi8Ns= +github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.0.1/go.mod h1:p3bKjDWHEgtuKKj+2OdPYs5dAPIjtpQGHF2tJfGz7Ww= +github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= +github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= +github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/ltcsuite/ltcd v0.20.1-beta h1:ka9ZwUG7oUPppl+7ptuh5VDxGD7TWEJXu/IOOOz1yfY= +github.com/ltcsuite/ltcd v0.20.1-beta/go.mod h1:ZFQaYdYULIuTQiWqs7AUiHD2XhDFeeHW1IH+UYMdABU= +github.com/ltcsuite/ltcutil v0.0.0-20191227053721-6bec450ea6ad/go.mod h1:8Vg/LTOO0KYa/vlHWJ6XZAevPQThGH5sufO0Hrou/lA= +github.com/ltcsuite/ltcutil v1.0.2-beta h1:IS86frABIvbpw9ilpQ/zt8t30pFog6DD4tBcgbjdj8g= +github.com/ltcsuite/ltcutil v1.0.2-beta/go.mod h1:G1JGpaqtMm0mPtheTryXnDd9a4KAFuGevdQirlJO1Nw= +github.com/ltcsuite/ltcwallet/wallet/txrules v1.0.0 h1:WDrodrBVO5EbaAT5//i2YOg7DH+FnWSm/kjTvMNT/EY= +github.com/ltcsuite/ltcwallet/wallet/txrules v1.0.0/go.mod h1:H/FiHbbfd9+TPn9ao1Ier7rBosT5j2ejIbHvZqHSEVU= +github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= +github.com/lucas-clemente/quic-go v0.16.0 h1:jJw36wfzGJhmOhAOaOC2lS36WgeqXQszH47A7spo1LI= +github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= +github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= +github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ= +github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o= +github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2 h1:XZLDTszBIJe6m0zF6ITBrEcZR73OPUhCBBS9rYAuUzI= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.1.0/go.mod h1:01k2RAqtoXIuPa3DCavAE9/6jc6nM0H3EgZyfUhN2oY= +github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.5 h1:QoRKvu0xHN1FCFJcMQLbG/yQE2z441L5urvG3+qyz7g= +github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.2 h1:2pAgScmS1g9XjH7EtAfNhTuyrWYEWcxy0G5Wo85hWDA= +github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.1.2 h1:knyamLYMPFPngQjGQ0lhnlys3jtVR/3xV6TREUJr+fE= +github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nanmu42/etherscan-api v1.1.1 h1:Pcx6+iIiERfw7ZeybEOx+ykEQDn1P0Shoxbamk/j620= +github.com/nanmu42/etherscan-api v1.1.1/go.mod h1:JNY1YEQ0cL4Ytlnb3Hf3tjk7rIgprDbludV4xVESLcg= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= +github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA= +github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN8iVhff6M38Mfu73FQiJve/GEXYJBjE= +github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= +github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= +github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= +github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v2.20.5-0.20200531151128-663af789c085+incompatible h1:+gAR1bMhuoQnZMTWFIvp7ukynULPsteLzG+siZKLtD8= +github.com/shirou/gopsutil v2.20.5-0.20200531151128-663af789c085+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= +github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= +github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= +github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/supranational/blst v0.1.1/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= +github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= +github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/cli/v2 v2.2.0 h1:JTTnM6wKzdA0Jqodd966MVj4vWbbquZykeX1sKbe2C4= +github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/weaveworks/common v0.0.0-20200512154658-384f10054ec5/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= +github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d h1:Y25auOnuZb/GuJvqMflRSDWBz8/HBRME8fiD+H8zLfs= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4= +github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= +github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/go-smux-yamux v2.0.9+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4/go.mod h1:K+EVq8d5QcQ2At5VECsA+SNZvWefyBXh8TnIsxo1OvQ= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= +github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= +github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk= +github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f h1:nMhj+x/m7ZQsHBz0L3gpytp0v6ogokdbrQDnhB8Kh7s= +github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= +github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YSlzGNuhOEo= +github.com/zquestz/grab v0.0.0-20190224022517-abcee96e61b1/go.mod h1:bslhAiUxakrA6z6CHmVyvkfpnxx18RJBwVyx2TluJWw= +go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= +go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= +go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= +go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= +go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= +go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= +go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200707235045-ab33eee955e0 h1:eIYIE7EC5/Wv5Kbz8bJPaq+TN3kq3W8S+LSm62vM0DY= +golang.org/x/crypto v0.0.0-20200707235045-ab33eee955e0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 h1:5B6i6EAiSYyejWfvc5Rc9BbI3rzIsrrXfAQBWnYfn+w= +golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980 h1:OjiUf46hAmXblsZdnoSXsEUSKU8r1UEzcL5RVZ4gO9Y= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 h1:i+Aiej6cta/Frzp13/swvwz5O00kYcSe0A/C5Wd7zX8= +google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200603215123-a4a8cb9d2cbc h1:17cdygvFw3DEyNMh81Bk687W74d5pcC5qEKQICv9N6g= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200603215123-a4a8cb9d2cbc/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= +modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0 h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/OpenBazaar/multiwallet/litecoin/sign.go b/vendor/github.com/OpenBazaar/multiwallet/litecoin/sign.go index 8991a42701..9c034fd268 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/litecoin/sign.go +++ b/vendor/github.com/OpenBazaar/multiwallet/litecoin/sign.go @@ -12,7 +12,6 @@ import ( "github.com/btcsuite/btcd/chaincfg" - "github.com/OpenBazaar/spvwallet" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" @@ -260,32 +259,36 @@ func newUnsignedTransaction(outputs []*wire.TxOut, feePerKb btc.Amount, fetchInp } } -func (w *LitecoinWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *LitecoinWallet) bumpFee(txid string) (string, error) { txn, err := w.db.Txns().Get(txid) if err != nil { - return nil, err + return "", err } if txn.Height > 0 { - return nil, spvwallet.BumpFeeAlreadyConfirmedError + return "", util.BumpFeeAlreadyConfirmedError } if txn.Height < 0 { - return nil, spvwallet.BumpFeeTransactionDeadError + return "", util.BumpFeeTransactionDeadError + } + chTxid, err := chainhash.NewHashFromStr(txid) + if err != nil { + return "", err } // Check utxos for CPFP utxos, _ := w.db.Utxos().GetAll() for _, u := range utxos { - if u.Op.Hash.IsEqual(&txid) && u.AtHeight == 0 { + if u.Op.Hash.IsEqual(chTxid) && u.AtHeight == 0 { addr, err := w.ScriptToAddress(u.ScriptPubkey) if err != nil { - return nil, err + return "", err } key, err := w.km.GetKeyForScript(addr.ScriptAddress()) if err != nil { - return nil, err + return "", err } h, err := hex.DecodeString(u.Op.Hash.String()) if err != nil { - return nil, err + return "", err } n := new(big.Int) n, _ = n.SetString(u.Value, 10) @@ -297,15 +300,15 @@ func (w *LitecoinWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { } transactionID, err := w.sweepAddress([]wi.TransactionInput{in}, nil, key, nil, wi.FEE_BUMP) if err != nil { - return nil, err + return "", err } return transactionID, nil } } - return nil, spvwallet.BumpFeeNotFoundError + return "", util.BumpFeeNotFoundError } -func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { var internalAddr btc.Address if address != nil { internalAddr = *address @@ -314,7 +317,7 @@ func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Ad } script, err := laddr.PayToAddrScript(internalAddr) if err != nil { - return nil, err + return "", err } var val int64 @@ -324,11 +327,11 @@ func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Ad val += in.Value.Int64() ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { - return nil, err + return "", err } script, err := laddr.PayToAddrScript(in.LinkedAddress) if err != nil { - return nil, err + return "", err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) @@ -340,7 +343,7 @@ func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Ad txType := P2PKH if redeemScript != nil { txType = P2SH_1of2_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) + _, err := util.LockTimeFromRedeemScript(*redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_1Sig } @@ -371,12 +374,12 @@ func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Ad // Sign tx privKey, err := key.ECPrivKey() if err != nil { - return nil, fmt.Errorf("retrieving private key: %s", err.Error()) + return "", fmt.Errorf("retrieving private key: %s", err.Error()) } pk := privKey.PubKey().SerializeCompressed() addressPub, err := btc.NewAddressPubKey(pk, w.params) if err != nil { - return nil, fmt.Errorf("generating address pub key: %s", err.Error()) + return "", fmt.Errorf("generating address pub key: %s", err.Error()) } getKey := txscript.KeyClosure(func(addr btc.Address) (*btcec.PrivateKey, bool, error) { @@ -404,9 +407,9 @@ func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Ad timeLocked = true tx.Version = 2 for _, txIn := range tx.TxIn { - locktime, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) + locktime, err := util.LockTimeFromRedeemScript(*redeemScript) if err != nil { - return nil, err + return "", err } txIn.Sequence = locktime } @@ -421,13 +424,13 @@ func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Ad tx, i, prevOutScript, txscript.SigHashAll, getKey, getScript, txIn.SignatureScript) if err != nil { - return nil, errors.New("Failed to sign transaction") + return "", errors.New("Failed to sign transaction") } txIn.SignatureScript = script } else { sig, err := txscript.RawTxInWitnessSignature(tx, hashes, i, ins[i].Value.Int64(), *redeemScript, txscript.SigHashAll, privKey) if err != nil { - return nil, err + return "", err } var witness wire.TxWitness if timeLocked { @@ -442,10 +445,10 @@ func (w *LitecoinWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Ad // broadcast if err := w.Broadcast(tx); err != nil { - return nil, err + return "", err } txid := tx.TxHash() - return &txid, nil + return txid.String(), nil } func (w *LitecoinWallet) createMultisigSignature(ins []wi.TransactionInput, outs []wi.TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte uint64) ([]wi.Signature, error) { @@ -471,7 +474,7 @@ func (w *LitecoinWallet) createMultisigSignature(ins []wi.TransactionInput, outs // Subtract fee txType := P2SH_2of3_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) + _, err := util.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } @@ -526,7 +529,7 @@ func (w *LitecoinWallet) multisign(ins []wi.TransactionInput, outs []wi.Transact // Subtract fee txType := P2SH_2of3_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(redeemScript) + _, err := util.LockTimeFromRedeemScript(redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_2Sigs } diff --git a/vendor/github.com/OpenBazaar/multiwallet/litecoin/wallet.go b/vendor/github.com/OpenBazaar/multiwallet/litecoin/wallet.go index 1c264048f9..99604d94c9 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/litecoin/wallet.go +++ b/vendor/github.com/OpenBazaar/multiwallet/litecoin/wallet.go @@ -20,7 +20,6 @@ import ( "github.com/OpenBazaar/multiwallet/util" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" hd "github.com/btcsuite/btcutil/hdkeychain" @@ -260,7 +259,7 @@ func (w *LitecoinWallet) Transactions() ([]wi.Txn, error) { return txns, nil } -func (w *LitecoinWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { +func (w *LitecoinWallet) GetTransaction(txid string) (wi.Txn, error) { txn, err := w.db.Txns().Get(txid) if err == nil { tx := wire.NewMsgTx(1) @@ -287,7 +286,7 @@ func (w *LitecoinWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { return txn, err } -func (w *LitecoinWallet) ChainTip() (uint32, chainhash.Hash) { +func (w *LitecoinWallet) ChainTip() (uint32, string) { return w.ws.ChainTip() } @@ -295,7 +294,7 @@ func (w *LitecoinWallet) GetFeePerByte(feeLevel wi.FeeLevel) big.Int { return *big.NewInt(int64(w.fp.GetFeePerByte(feeLevel))) } -func (w *LitecoinWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (*chainhash.Hash, error) { +func (w *LitecoinWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (string, error) { var ( tx *wire.MsgTx err error @@ -303,25 +302,25 @@ func (w *LitecoinWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi if spendAll { tx, err = w.buildSpendAllTx(addr, feeLevel) if err != nil { - return nil, err + return "", err } } else { tx, err = w.buildTx(amount.Int64(), addr, feeLevel, nil) if err != nil { - return nil, err + return "", err } } // Broadcast if err := w.Broadcast(tx); err != nil { - return nil, err + return "", err } ch := tx.TxHash() - return &ch, nil + return ch.String(), nil } -func (w *LitecoinWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *LitecoinWallet) BumpFee(txid string) (string, error) { return w.bumpFee(txid) } @@ -342,7 +341,7 @@ func (w *LitecoinWallet) EstimateSpendFee(amount big.Int, feeLevel wi.FeeLevel) return *big.NewInt(int64(val)), err } -func (w *LitecoinWallet) SweepAddress(ins []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *LitecoinWallet) SweepAddress(ins []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { return w.sweepAddress(ins, address, key, redeemScript, feeLevel) } @@ -401,7 +400,7 @@ func (w *LitecoinWallet) ReSyncBlockchain(fromTime time.Time) { go w.ws.UpdateState() } -func (w *LitecoinWallet) GetConfirmations(txid chainhash.Hash) (uint32, uint32, error) { +func (w *LitecoinWallet) GetConfirmations(txid string) (uint32, uint32, error) { txn, err := w.db.Txns().Get(txid) if err != nil { return 0, 0, err diff --git a/vendor/github.com/OpenBazaar/multiwallet/model/mock/interfaces.go b/vendor/github.com/OpenBazaar/multiwallet/model/mock/interfaces.go new file mode 100644 index 0000000000..8872a5bbcd --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/model/mock/interfaces.go @@ -0,0 +1,188 @@ +package mock + +import ( + "encoding/hex" + "errors" + "fmt" + "sync" + + gosocketio "github.com/OpenBazaar/golang-socketio" + "github.com/OpenBazaar/multiwallet/client" + "github.com/OpenBazaar/multiwallet/model" + "github.com/btcsuite/btcutil" +) + +type MockAPIClient struct { + blockChan chan model.Block + txChan chan model.Transaction + + listeningAddrs []btcutil.Address + chainTip int + feePerBlock int + info *model.Info + addrToScript func(btcutil.Address) ([]byte, error) +} + +func NewMockApiClient(addrToScript func(btcutil.Address) ([]byte, error)) model.APIClient { + return &MockAPIClient{ + blockChan: make(chan model.Block), + txChan: make(chan model.Transaction), + chainTip: 0, + addrToScript: addrToScript, + feePerBlock: 1, + info: &MockInfo, + } +} + +func (m *MockAPIClient) Start() error { + return nil +} + +func (m *MockAPIClient) GetInfo() (*model.Info, error) { + return m.info, nil +} + +func (m *MockAPIClient) GetTransaction(txid string) (*model.Transaction, error) { + for _, tx := range MockTransactions { + if tx.Txid == txid { + return &tx, nil + } + } + return nil, errors.New("Not found") +} + +func (m *MockAPIClient) GetRawTransaction(txid string) ([]byte, error) { + if raw, ok := MockRawTransactions[txid]; ok { + return raw, nil + } + return nil, errors.New("Not found") +} + +func (m *MockAPIClient) GetTransactions(addrs []btcutil.Address) ([]model.Transaction, error) { + txs := make([]model.Transaction, len(MockTransactions)) + copy(txs, MockTransactions) + txs[0].Outputs[1].ScriptPubKey.Addresses = []string{addrs[0].String()} + txs[1].Inputs[0].Addr = addrs[0].String() + txs[1].Outputs[1].ScriptPubKey.Addresses = []string{addrs[1].String()} + txs[2].Outputs[1].ScriptPubKey.Addresses = []string{addrs[2].String()} + return txs, nil +} + +func (m *MockAPIClient) GetUtxos(addrs []btcutil.Address) ([]model.Utxo, error) { + utxos := make([]model.Utxo, len(MockUtxos)) + copy(utxos, MockUtxos) + utxos[0].Address = addrs[1].String() + script, _ := m.addrToScript(addrs[1]) + utxos[0].ScriptPubKey = hex.EncodeToString(script) + utxos[1].Address = addrs[2].String() + script, _ = m.addrToScript(addrs[2]) + utxos[1].ScriptPubKey = hex.EncodeToString(script) + return utxos, nil +} + +func (m *MockAPIClient) BlockNotify() <-chan model.Block { + return m.blockChan +} + +func (m *MockAPIClient) TransactionNotify() <-chan model.Transaction { + return m.txChan +} + +func (m *MockAPIClient) ListenAddresses(addrs ...btcutil.Address) { + m.listeningAddrs = append(m.listeningAddrs, addrs...) +} + +func (m *MockAPIClient) Broadcast(tx []byte) (string, error) { + return "a8c685478265f4c14dada651969c45a65e1aeb8cd6791f2f5bb6a1d9952104d9", nil +} + +func (m *MockAPIClient) GetBestBlock() (*model.Block, error) { + return &MockBlocks[m.chainTip], nil +} + +func (m *MockAPIClient) EstimateFee(nBlocks int) (int, error) { + return m.feePerBlock * nBlocks, nil +} + +func (m *MockAPIClient) Close() {} + +func MockWebsocketClientOnClientPool(p *client.ClientPool) *MockSocketClient { + var ( + callbacksMap = make(map[string]func(*gosocketio.Channel, interface{})) + mockSocketClient = &MockSocketClient{ + callbacks: callbacksMap, + listeningAddresses: []string{}, + } + ) + for _, c := range p.Clients() { + c.SocketClient = mockSocketClient + } + return mockSocketClient +} + +func NewMockWebsocketClient() *MockSocketClient { + var ( + callbacksMap = make(map[string]func(*gosocketio.Channel, interface{})) + mockSocketClient = &MockSocketClient{ + callbacks: callbacksMap, + listeningAddresses: []string{}, + } + ) + return mockSocketClient +} + +type MockSocketClient struct { + callbackMutex sync.Mutex + callbacks map[string]func(*gosocketio.Channel, interface{}) + listeningAddresses []string +} + +func (m *MockSocketClient) SendCallback(method string, args ...interface{}) { + if gosocketChan, ok := args[0].(*gosocketio.Channel); ok { + m.callbacks[method](gosocketChan, args[1]) + } else { + m.callbacks[method](nil, args[1]) + } +} + +func (m *MockSocketClient) IsListeningForAddress(addr string) bool { + for _, a := range m.listeningAddresses { + if a == addr { + return true + } + } + return false +} + +func (m *MockSocketClient) On(method string, callback interface{}) error { + c, ok := callback.(func(h *gosocketio.Channel, args interface{})) + if !ok { + return fmt.Errorf("failed casting mock callback: %+v", callback) + } + + m.callbackMutex.Lock() + defer m.callbackMutex.Unlock() + if method == "bitcoind/addresstxid" { + m.callbacks[method] = c + } else if method == "bitcoind/hashblock" { + m.callbacks[method] = c + } + return nil +} + +func (m *MockSocketClient) Emit(method string, args []interface{}) error { + if method == "subscribe" { + subscribeTo, ok := args[0].(string) + if !ok || subscribeTo != "bitcoind/addresstxid" { + return fmt.Errorf("first emit arg is not bitcoind/addresstxid, was: %+v", args[0]) + } + addrs, ok := args[1].([]string) + if !ok { + return fmt.Errorf("second emit arg is not address value, was %+v", args[1]) + } + m.listeningAddresses = append(m.listeningAddresses, addrs...) + } + return nil +} + +func (m *MockSocketClient) Close() {} diff --git a/vendor/github.com/OpenBazaar/multiwallet/model/mock/models.go b/vendor/github.com/OpenBazaar/multiwallet/model/mock/models.go new file mode 100644 index 0000000000..101d7e7a40 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/model/mock/models.go @@ -0,0 +1,283 @@ +package mock + +import "github.com/OpenBazaar/multiwallet/model" + +var MockInfo = model.Info{ + Version: 1, + ProtocolVersion: 9005, + Blocks: 1289596, + TimeOffset: 0, + Connections: 1024, + DifficultyIface: "1.23", + Difficulty: 1.23, + Testnet: true, + RelayFeeIface: "1.00", + RelayFee: 1.00, + Errors: "", + Network: "testnet", +} + +var MockBlocks = []model.Block{ + { + Hash: "000000000000004c68a477283a8db18c1d1c2155b03d9bc23d587ac5e1c4d1af", + Height: 1289594, + PreviousBlockhash: "00000000000003df72ec254d787b216ae913cb82c6ab601c4b3f19fd5d1cf9aa", + Tx: make([]string, 21), + Size: 4705, + Time: 1522349145, + }, + { + Hash: "0000000000000142ffae87224cb67206e93bf934f9fdeba75d02a7050acc6136", + Height: 1289595, + PreviousBlockhash: "000000000000004c68a477283a8db18c1d1c2155b03d9bc23d587ac5e1c4d1af", + Tx: make([]string, 30), + Size: 6623, + Time: 1522349136, + }, + { + Hash: "000000000000033ef24180d5d282d0e6d03b1185e29421fda97e1ba0ffd7c918", + Height: 1289596, + PreviousBlockhash: "0000000000000142ffae87224cb67206e93bf934f9fdeba75d02a7050acc6136", + Tx: make([]string, 5), + Size: 1186, + Time: 1522349156, + }, +} + +var MockTransactions = []model.Transaction{ + { + Txid: "54ebaa07c42216393b9d5816e40dd608593b92c42e2d6525f45bdd36bce8fe4d", + Version: 2, + Locktime: 512378, + Inputs: []model.Input{ + { + Txid: "6d892f04fc097f430d58ab06229c9b6344a130fc1842da5b990e857daed42194", + Vout: 1, + Sequence: 1, + ValueIface: "0.04294455", + Value: 0.04294455, + N: 0, + ScriptSig: model.Script{ + Hex: "4830450221008665481674067564ef562cfd8d1ca8f1506133fb26a2319e4b8dfba3cedfd5de022038f27121c44e6c64b93b94d72620e11b9de35fd864730175db9176ca98f1ec610121022023e49335a0dddb864ff673468a6cc04e282571b1227933fcf3ff9babbcc662", + }, + Addr: "1C74Gbij8Q5h61W58aSKGvXK4rk82T2A3y", + Satoshis: 4294455, + }, + }, + Outputs: []model.Output{ + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "76a914ff3f7d402fbd6d116ba4a02af9784f3ae9b7108a88ac", + }, + Type: "pay-to-pubkey-hash", + Addresses: []string{"1QGdNEDjWnghrjfTBCTDAPZZ3ffoKvGc9B"}, + }, + ValueIface: "0.01398175", + Value: 0.01398175, + N: 0, + }, + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "76a914f99b84270843bdab59a71ce9af15b89bef5087a388ac", + }, + Type: "pay-to-pubkey-hash", + Addresses: []string{"1PkoZDtXT63BnYGd429Vy4DoyGhdDcjQiN"}, // var + }, + ValueIface: "0.02717080", + Value: 0.02717080, + N: 1, + }, + }, + Time: 1520449061, + BlockHash: "0000000000000000003f1fb88ac3dab0e607e87def0e9031f7bea02cb464a04f", + BlockHeight: 1289475, + Confirmations: 15, + }, + { + Txid: "ff2b865c3b73439912eebf4cce9a15b12c7d7bcdd14ae1110a90541426c4e7c5", + Version: 2, + Locktime: 0, + Inputs: []model.Input{ + { + Txid: "54ebaa07c42216393b9d5816e40dd608593b92c42e2d6525f45bdd36bce8fe4d", + Vout: 1, + Sequence: 1, + ValueIface: "0.02717080", + Value: 0.02717080, + N: 0, + ScriptSig: model.Script{ + Hex: "4830450221008665481674067564ef562cfd8d1ca8f1506133fb26a2319e4b8dfba3cedfd5de022038f27121c44e6c64b93b94d72620e11b9de35fd864730175db9176ca98f1ec610121022023e49335a0dddb864ff673468a6cc04e282571b1227933fcf3ff9babbcc662", + }, + Addr: "1PkoZDtXT63BnYGd429Vy4DoyGhdDcjQiN", // var tx0:1 + Satoshis: 2717080, + }, + }, + Outputs: []model.Output{ + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "a9144b18dadba74ad5ef4dbbfea47f9d5aaefe766c6387", + }, + Type: "pay-to-script-hash", + Addresses: []string{"38Y6Nt35hQcEDxyCfCEi62QLGPnr4mhANc"}, + }, + ValueIface: "0.01398175", + Value: 0.01617080, + N: 0, + }, + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "76a914f821d6db9376dc60124de46a8683110877e1f13188ac", + }, + Type: "pay-to-pubkey-hash", + Addresses: []string{"1Pd17mbYsVPcCKLtNdPkngtizTj7zjzqeK"}, // var change + }, + ValueIface: "0.01", + Value: 0.01, + N: 1, + }, + }, + Time: 1520449061, + BlockHash: "0000000000000000003f1fb88ac3dab0e607e87def0e9031f7bea02cb464a04f", + BlockHeight: 1289475, + Confirmations: 10, + }, + { + Txid: "1d4288fa682fa376fbae73dbd74ea04b9ea33011d63315ca9d2d50d081e671d5", + Version: 2, + Locktime: 0, + Inputs: []model.Input{ + { + Txid: "bffb894c27dac82525c1f00a085150be94c70834e8d05ea5e7bb3bd1278d3138", + Vout: 1, + Sequence: 1, + ValueIface: "0.3", + Value: 0.3, + N: 0, + ScriptSig: model.Script{ + Hex: "4830450221008665481674067564ef562cfd8d1ca8f1506133fb26a2319e4b8dfba3cedfd5de022038f27121c44e6c64b93b94d72620e11b9de35fd864730175db9176ca98f1ec610121022023e49335a0dddb864ff673468a6cc04e282571b1227933fcf3ff9babbcc662", + }, + Addr: "1H2ZS69jUZz6CuCtiRCTWXr4AhAWfXc4YT", + Satoshis: 2717080, + }, + }, + Outputs: []model.Output{ + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "76a914e20c0ca5875b1fb0d057e23d032ba88b9dda6f3888ac", + }, + Type: "pay-to-pubkey-hash", + Addresses: []string{"1McE9ZXFhWkFeAqR1hyAm1XaDK8zvyrFPr"}, + }, + ValueIface: "0.2", + Value: 0.2, + N: 0, + }, + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "76a914594963287fe6684872340e9078a78d0accbec26288ac", + }, + Type: "pay-to-pubkey-hash", + Addresses: []string{"199747e2arXMBPiWfTqpBTXz3eFbeJPMqS"}, // var + }, + ValueIface: "0.1", + Value: 0.1, + N: 1, + }, + }, + Time: 1520449061, + BlockHash: "0000000000000000003f1fb88ac3dab0e607e87def0e9031f7bea02cb464a04f", + BlockHeight: 1289475, + Confirmations: 2, + }, + { + Txid: "830bf683ab8eec1a75d891689e2989f846508bc7d500cb026ef671c2d1dce20c", + Version: 2, + Locktime: 516299, + Inputs: []model.Input{ + { + Txid: "b466d034076ab53f4b019d573b6c68cf68c5b9a8cfbf07c8d46208d0fcf37762", + Vout: 0, + Sequence: 4294967294, + ValueIface: "0.01983741", + Value: 0.01983741, + N: 0, + ScriptSig: model.Script{ + Hex: "483045022100baa2b3653d48ccf2838caa549d96a40540c838c4f4a8e7048dbe158ec180b3f602206f1bb8c6d055103ce635db562c31ebd8c30565c5d415458affb9f99407ec06d10121039fea462cb64296e01384cffc16af4b86ab14b6027094399bf5a4b52e5c9ffef3", + }, + Addr: "1LUv9VNMZQR4VknWj1TBa1oDgPq53wP7BK", + Satoshis: 1983741, + }, + }, + Outputs: []model.Output{ + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "76a91491a8a9e0375f10b721743782162a0b4f9fae69a888ac", + }, + Type: "pay-to-pubkey-hash", + Addresses: []string{"1EHB2mSaUXzkM6r6XgVHcutFDZoB9e2mZH"}, + }, + ValueIface: "0.01181823", + Value: 0.01181823, + N: 0, + }, + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "a91457fc729da2a83dc8cd3c1835351c4a813c2ae8ba87", + }, + Type: "pay-to-script-hash", + Addresses: []string{"39iF8cDMhctrPVoPbi2Vb1NnErg6CEB7BZ"}, + }, + ValueIface: "0.00751918", + Value: 0.00751918, + N: 1, + }, + }, + Time: 1520449061, + BlockHash: "0000000000000000003f1fb88ac3dab0e607e87def0e9031f7bea02cb464a04f", + BlockHeight: 1289475, + Confirmations: 2, + }, +} + +var MockRawTransactions = map[string][]byte{} + +var MockUtxos = []model.Utxo{ + { + Address: "1Pd17mbYsVPcCKLtNdPkngtizTj7zjzqeK", // tx1:1 + ScriptPubKey: "76a914f821d6db9376dc60124de46a8683110877e1f13188ac", + Vout: 1, + Satoshis: 1000000, + Confirmations: 10, + Txid: "ff2b865c3b73439912eebf4cce9a15b12c7d7bcdd14ae1110a90541426c4e7c5", + AmountIface: "0.01", + Amount: 0.01, + }, + { + Address: "199747e2arXMBPiWfTqpBTXz3eFbeJPMqS", //tx2:1 + ScriptPubKey: "76a914594963287fe6684872340e9078a78d0accbec26288ac", + Vout: 1, + Satoshis: 10000000, + Confirmations: 2, + Txid: "1d4288fa682fa376fbae73dbd74ea04b9ea33011d63315ca9d2d50d081e671d5", + AmountIface: "0.1", + Amount: 0.1, + }, + { + Address: "39iF8cDMhctrPVoPbi2Vb1NnErg6CEB7BZ", + ScriptPubKey: "a91457fc729da2a83dc8cd3c1835351c4a813c2ae8ba87", + Vout: 1, + Satoshis: 751918, + Confirmations: 2, + Txid: "830bf683ab8eec1a75d891689e2989f846508bc7d500cb026ef671c2d1dce20c", + AmountIface: "0.00751918", + Amount: 0.00751918, + }, +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/multiwallet.go b/vendor/github.com/OpenBazaar/multiwallet/multiwallet.go index 2307e92834..c1513b9069 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/multiwallet.go +++ b/vendor/github.com/OpenBazaar/multiwallet/multiwallet.go @@ -2,6 +2,7 @@ package multiwallet import ( "errors" + "github.com/OpenBazaar/multiwallet/filecoin" "strings" "time" @@ -98,6 +99,16 @@ func NewMultiWallet(cfg *config.Config) (MultiWallet, error) { } else { multiwallet[wallet.TestnetEthereum] = w } + case wallet.Filecoin: + w, err = filecoin.NewFilecoinWallet(coin, cfg.Mnemonic, cfg.Params, cfg.Proxy, cfg.Cache, cfg.DisableExchangeRates) + if err != nil { + return nil, err + } + if cfg.Params.Name == chaincfg.MainNetParams.Name { + multiwallet[wallet.Filecoin] = w + } else { + multiwallet[wallet.TestnetFilecoin] = w + } } } return multiwallet, nil diff --git a/vendor/github.com/OpenBazaar/multiwallet/service/wallet_service.go b/vendor/github.com/OpenBazaar/multiwallet/service/wallet_service.go index 3b4d48aae6..8b240d4d97 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/service/wallet_service.go +++ b/vendor/github.com/OpenBazaar/multiwallet/service/wallet_service.go @@ -98,14 +98,10 @@ func (ws *WalletService) Stop() { ws.doneChan <- struct{}{} } -func (ws *WalletService) ChainTip() (uint32, chainhash.Hash) { +func (ws *WalletService) ChainTip() (uint32, string) { ws.lock.RLock() defer ws.lock.RUnlock() - ch, err := chainhash.NewHashFromStr(ws.bestBlock) - if err != nil { - Log.Errorf("producing BestBlock hash: %s", err.Error()) - } - return ws.chainHeight, *ch + return ws.chainHeight, ws.bestBlock } func (ws *WalletService) AddTransactionListener(callback func(callback wallet.TransactionCallback)) { @@ -525,7 +521,7 @@ func (ws *WalletService) saveSingleTxToDB(u model.Transaction, chainHeight int32 cb.Value = *value cb.WatchOnly = (hits == 0) - saved, err := ws.db.Txns().Get(*txHash) + saved, err := ws.db.Txns().Get(txHash.String()) if err != nil || saved.WatchOnly != cb.WatchOnly { ts := time.Now() if u.Confirmations > 0 { @@ -547,15 +543,15 @@ func (ws *WalletService) saveSingleTxToDB(u model.Transaction, chainHeight int32 cb.Timestamp = ts ws.callbackListeners(cb) } else if height > 0 { - err := ws.db.Txns().UpdateHeight(*txHash, int(height), time.Unix(u.BlockTime, 0)) + err := ws.db.Txns().UpdateHeight(txHash.String(), int(height), time.Unix(u.BlockTime, 0)) if err != nil { Log.Errorf("updating height for tx (%s): %s", txHash.String(), err.Error()) return } if saved.Height != height { cb.Timestamp = saved.Timestamp + ws.callbackListeners(cb) } - ws.callbackListeners(cb) } } diff --git a/vendor/github.com/OpenBazaar/multiwallet/test/factory/transaction.go b/vendor/github.com/OpenBazaar/multiwallet/test/factory/transaction.go new file mode 100644 index 0000000000..77304ec7f4 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/test/factory/transaction.go @@ -0,0 +1,50 @@ +package factory + +import "github.com/OpenBazaar/multiwallet/model" + +func NewTransaction() model.Transaction { + return model.Transaction{ + Txid: "1be612e4f2b79af279e0b307337924072b819b3aca09fcb20370dd9492b83428", + Version: 2, + Locktime: 512378, + Inputs: []model.Input{ + { + Txid: "6d892f04fc097f430d58ab06229c9b6344a130fc1842da5b990e857daed42194", + Vout: 1, + Sequence: 1, + ValueIface: "0.04294455", + ScriptSig: model.Script{ + Hex: "4830450221008665481674067564ef562cfd8d1ca8f1506133fb26a2319e4b8dfba3cedfd5de022038f27121c44e6c64b93b94d72620e11b9de35fd864730175db9176ca98f1ec610121022023e49335a0dddb864ff673468a6cc04e282571b1227933fcf3ff9babbcc662", + }, + Addr: "1C74Gbij8Q5h61W58aSKGvXK4rk82T2A3y", + Satoshis: 4294455, + }, + }, + Outputs: []model.Output{ + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "76a914ff3f7d402fbd6d116ba4a02af9784f3ae9b7108a88ac", + }, + Type: "pay-to-pubkey-hash", + Addresses: []string{"1QGdNEDjWnghrjfTBCTDAPZZ3ffoKvGc9B"}, + }, + ValueIface: "0.01398175", + }, + { + ScriptPubKey: model.OutScript{ + Script: model.Script{ + Hex: "a9148a62462d08a977fa89226a56fca7eb01b6fef67c87", + }, + Type: "pay-to-script-hashh", + Addresses: []string{"3EJiuDqsHuAtFqiLGWKVyCfvqoGpWVCCRs"}, + }, + ValueIface: "0.02717080", + }, + }, + Time: 1520449061, + BlockHash: "0000000000000000003f1fb88ac3dab0e607e87def0e9031f7bea02cb464a04f", + BlockHeight: 512476, + Confirmations: 1, + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/test/helper.go b/vendor/github.com/OpenBazaar/multiwallet/test/helper.go new file mode 100644 index 0000000000..10386952b4 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/test/helper.go @@ -0,0 +1,85 @@ +package test + +import ( + "testing" + + "github.com/OpenBazaar/multiwallet/model" +) + +func ValidateTransaction(tx, expectedTx model.Transaction, t *testing.T) { + if tx.Txid != expectedTx.Txid { + t.Error("Returned invalid transaction") + } + if tx.Version != expectedTx.Version { + t.Error("Returned invalid transaction") + } + if tx.Locktime != expectedTx.Locktime { + t.Error("Returned invalid transaction") + } + if tx.Time != expectedTx.Time { + t.Error("Returned invalid transaction") + } + if tx.BlockHash != expectedTx.BlockHash { + t.Error("Returned invalid transaction") + } + if tx.BlockHeight != expectedTx.BlockHeight { + t.Error("Returned invalid transaction") + } + if tx.Confirmations != expectedTx.Confirmations { + t.Error("Returned invalid transaction") + } + if len(tx.Inputs) != 1 { + t.Error("Returned incorrect number of inputs") + return + } + if tx.Inputs[0].Txid != expectedTx.Inputs[0].Txid { + t.Error("Returned invalid transaction") + } + if tx.Inputs[0].Value != 0.04294455 { + t.Error("Returned invalid transaction") + } + if tx.Inputs[0].Satoshis != expectedTx.Inputs[0].Satoshis { + t.Error("Returned invalid transaction") + } + if tx.Inputs[0].Addr != expectedTx.Inputs[0].Addr { + t.Error("Returned invalid transaction") + } + if tx.Inputs[0].Sequence != expectedTx.Inputs[0].Sequence { + t.Error("Returned invalid transaction") + } + if tx.Inputs[0].Vout != expectedTx.Inputs[0].Vout { + t.Error("Returned invalid transaction") + } + if tx.Inputs[0].ScriptSig.Hex != expectedTx.Inputs[0].ScriptSig.Hex { + t.Error("Returned invalid transaction") + } + + if len(tx.Outputs) != 2 { + t.Error("Returned incorrect number of outputs") + return + } + if tx.Outputs[0].Value != 0.01398175 { + t.Error("Returned invalid transaction") + } + if tx.Outputs[0].ScriptPubKey.Hex != expectedTx.Outputs[0].ScriptPubKey.Hex { + t.Error("Returned invalid transaction") + } + if tx.Outputs[0].ScriptPubKey.Type != expectedTx.Outputs[0].ScriptPubKey.Type { + t.Error("Returned invalid transaction") + } + if tx.Outputs[0].ScriptPubKey.Addresses[0] != expectedTx.Outputs[0].ScriptPubKey.Addresses[0] { + t.Error("Returned invalid transaction") + } + if tx.Outputs[1].Value != 0.02717080 { + t.Error("Returned invalid transaction") + } + if tx.Outputs[1].ScriptPubKey.Hex != expectedTx.Outputs[1].ScriptPubKey.Hex { + t.Error("Returned invalid transaction") + } + if tx.Outputs[1].ScriptPubKey.Type != expectedTx.Outputs[1].ScriptPubKey.Type { + t.Error("Returned invalid transaction") + } + if tx.Outputs[1].ScriptPubKey.Addresses[0] != expectedTx.Outputs[1].ScriptPubKey.Addresses[0] { + t.Error("Returned invalid transaction") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/balance.go b/vendor/github.com/OpenBazaar/multiwallet/util/balance.go index 8202a0ae09..ef2d7ca4c2 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/util/balance.go +++ b/vendor/github.com/OpenBazaar/multiwallet/util/balance.go @@ -2,12 +2,17 @@ package util import ( "bytes" + "errors" "strconv" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/wire" ) +var BumpFeeAlreadyConfirmedError = errors.New("Transaction is confirmed, cannot bump fee") +var BumpFeeTransactionDeadError = errors.New("Cannot bump fee of dead transaction") +var BumpFeeNotFoundError = errors.New("Transaction either doesn't exist or has already been spent") + func CalcBalance(utxos []wi.Utxo, txns []wi.Txn) (confirmed, unconfirmed int64) { var txmap = make(map[string]wi.Txn) for _, tx := range txns { diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/bitconprices.go b/vendor/github.com/OpenBazaar/multiwallet/util/bitconprices.go new file mode 100644 index 0000000000..56298c7d12 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/util/bitconprices.go @@ -0,0 +1,239 @@ +package util + +import ( + "encoding/json" + "errors" + "net" + "net/http" + "reflect" + "strconv" + "sync" + "time" + + "github.com/op/go-logging" + "golang.org/x/net/proxy" +) + +const SatoshiPerBTC int64 = 100000000 + +var log = logging.MustGetLogger("exchangeRates") + +type ExchangeRateProvider struct { + fetchUrl string + cache map[string]float64 + client *http.Client + decoder ExchangeRateDecoder +} + +type ExchangeRateDecoder interface { + decode(dat interface{}, cache map[string]float64) (err error) +} + +// empty structs to tag the different ExchangeRateDecoder implementations +type BitcoinAverageDecoder struct{} +type BitPayDecoder struct{} +type BlockchainInfoDecoder struct{} +type BitcoinChartsDecoder struct{} + +type BitcoinPriceFetcher struct { + sync.Mutex + cache map[string]float64 + providers []*ExchangeRateProvider +} + +func NewBitcoinPriceFetcher(dialer proxy.Dialer) *BitcoinPriceFetcher { + b := BitcoinPriceFetcher{ + cache: make(map[string]float64), + } + dial := net.Dial + if dialer != nil { + dial = dialer.Dial + } + tbTransport := &http.Transport{Dial: dial} + client := &http.Client{Transport: tbTransport, Timeout: time.Minute} + + b.providers = []*ExchangeRateProvider{ + {"https://ticker.openbazaar.org/api", b.cache, client, BitcoinAverageDecoder{}}, + {"https://bitpay.com/api/rates", b.cache, client, BitPayDecoder{}}, + {"https://blockchain.info/ticker", b.cache, client, BlockchainInfoDecoder{}}, + {"https://api.bitcoincharts.com/v1/weighted_prices.json", b.cache, client, BitcoinChartsDecoder{}}, + } + return &b +} + +func (b *BitcoinPriceFetcher) GetExchangeRate(currencyCode string) (float64, error) { + currencyCode = NormalizeCurrencyCode(currencyCode) + + b.Lock() + defer b.Unlock() + price, ok := b.cache[currencyCode] + if !ok { + return 0, errors.New("Currency not tracked") + } + return price, nil +} + +func (b *BitcoinPriceFetcher) GetLatestRate(currencyCode string) (float64, error) { + currencyCode = NormalizeCurrencyCode(currencyCode) + + b.fetchCurrentRates() + b.Lock() + defer b.Unlock() + price, ok := b.cache[currencyCode] + if !ok { + return 0, errors.New("Currency not tracked") + } + return price, nil +} + +func (b *BitcoinPriceFetcher) GetAllRates(cacheOK bool) (map[string]float64, error) { + if !cacheOK { + err := b.fetchCurrentRates() + if err != nil { + return nil, err + } + } + b.Lock() + defer b.Unlock() + copy := make(map[string]float64, len(b.cache)) + for k, v := range b.cache { + copy[k] = v + } + return copy, nil +} + +func (b *BitcoinPriceFetcher) UnitsPerCoin() int64 { + return SatoshiPerBTC +} + +func (b *BitcoinPriceFetcher) fetchCurrentRates() error { + b.Lock() + defer b.Unlock() + for _, provider := range b.providers { + err := provider.fetch() + if err == nil { + return nil + } + } + log.Error("Failed to fetch bitcoin exchange rates") + return errors.New("All exchange rate API queries failed") +} + +func (provider *ExchangeRateProvider) fetch() (err error) { + if len(provider.fetchUrl) == 0 { + err = errors.New("Provider has no fetchUrl") + return err + } + resp, err := provider.client.Get(provider.fetchUrl) + if err != nil { + log.Error("Failed to fetch from "+provider.fetchUrl, err) + return err + } + decoder := json.NewDecoder(resp.Body) + var dataMap interface{} + err = decoder.Decode(&dataMap) + if err != nil { + log.Error("Failed to decode JSON from "+provider.fetchUrl, err) + return err + } + return provider.decoder.decode(dataMap, provider.cache) +} + +func (b *BitcoinPriceFetcher) Run() { + b.fetchCurrentRates() + ticker := time.NewTicker(time.Minute * 15) + for range ticker.C { + b.fetchCurrentRates() + } +} + +// Decoders +func (b BitcoinAverageDecoder) decode(dat interface{}, cache map[string]float64) (err error) { + data, ok := dat.(map[string]interface{}) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed") + } + for k, v := range data { + if k != "timestamp" { + val, ok := v.(map[string]interface{}) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed") + } + price, ok := val["last"].(float64) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed, missing 'last' (float) field") + } + cache[k] = price + } + } + return nil +} + +func (b BitPayDecoder) decode(dat interface{}, cache map[string]float64) (err error) { + data, ok := dat.([]interface{}) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed, not JSON array") + } + + for _, obj := range data { + code := obj.(map[string]interface{}) + k, ok := code["code"].(string) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed, missing 'code' (string) field") + } + price, ok := code["rate"].(float64) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed, missing 'rate' (float) field") + } + cache[k] = price + } + return nil +} + +func (b BlockchainInfoDecoder) decode(dat interface{}, cache map[string]float64) (err error) { + data, ok := dat.(map[string]interface{}) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed, not JSON object") + } + for k, v := range data { + val, ok := v.(map[string]interface{}) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed") + } + price, ok := val["last"].(float64) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed, missing 'last' (float) field") + } + cache[k] = price + } + return nil +} + +func (b BitcoinChartsDecoder) decode(dat interface{}, cache map[string]float64) (err error) { + data, ok := dat.(map[string]interface{}) + if !ok { + return errors.New(reflect.TypeOf(b).Name() + ".decode: Type assertion failed, not JSON object") + } + for k, v := range data { + if k != "timestamp" { + val, ok := v.(map[string]interface{}) + if !ok { + return errors.New("Type assertion failed") + } + p, ok := val["24h"] + if !ok { + continue + } + pr, ok := p.(string) + if !ok { + return errors.New("Type assertion failed") + } + price, err := strconv.ParseFloat(pr, 64) + if err != nil { + return err + } + cache[k] = price + } + } + return nil +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/fees.go b/vendor/github.com/OpenBazaar/multiwallet/util/fees.go index add68e4126..a161a6c6b1 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/util/fees.go +++ b/vendor/github.com/OpenBazaar/multiwallet/util/fees.go @@ -45,8 +45,8 @@ func NewFeeProvider(maxFee, priorityFee, normalFee, economicFee, superEconomicFe maxFee: maxFee, priorityFee: priorityFee, normalFee: normalFee, - superEconomicFee: superEconomicFee, economicFee: economicFee, + superEconomicFee: superEconomicFee, exchangeRates: exchangeRates, } } @@ -54,7 +54,7 @@ func NewFeeProvider(maxFee, priorityFee, normalFee, economicFee, superEconomicFe func (fp *FeeProvider) GetFeePerByte(feeLevel wallet.FeeLevel) uint64 { defaultFee := func() uint64 { switch feeLevel { - case wallet.PRIORITY: + case wallet.PRIOIRTY: return fp.priorityFee case wallet.NORMAL: return fp.normalFee @@ -79,12 +79,14 @@ func (fp *FeeProvider) GetFeePerByte(feeLevel wallet.FeeLevel) uint64 { var target FeeTargetInUSDCents switch feeLevel { - case wallet.PRIORITY: + case wallet.PRIOIRTY: target = PriorityTarget case wallet.NORMAL: target = NormalTarget case wallet.ECONOMIC: target = EconomicTarget + case wallet.SUPER_ECONOMIC: + return fp.superEconomicFee case wallet.FEE_BUMP: target = PriorityTarget * 2 default: diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/locktime.go b/vendor/github.com/OpenBazaar/multiwallet/util/locktime.go new file mode 100644 index 0000000000..4816159306 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/util/locktime.go @@ -0,0 +1,33 @@ +package util + +import "errors" + +func LockTimeFromRedeemScript(redeemScript []byte) (uint32, error) { + if len(redeemScript) < 113 { + return 0, errors.New("Redeem script invalid length") + } + if redeemScript[106] != 103 { + return 0, errors.New("Invalid redeem script") + } + if redeemScript[107] == 0 { + return 0, nil + } + if 81 <= redeemScript[107] && redeemScript[107] <= 96 { + return uint32((redeemScript[107] - 81) + 1), nil + } + var v []byte + op := redeemScript[107] + if 1 <= op && op <= 75 { + for i := 0; i < int(op); i++ { + v = append(v, []byte{redeemScript[108+i]}...) + } + } else { + return 0, errors.New("Too many bytes pushed for sequence") + } + var result int64 + for i, val := range v { + result |= int64(val) << uint8(8*i) + } + + return uint32(result), nil +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/zcash/sign.go b/vendor/github.com/OpenBazaar/multiwallet/zcash/sign.go index c3180d6e31..2b73e31870 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/zcash/sign.go +++ b/vendor/github.com/OpenBazaar/multiwallet/zcash/sign.go @@ -13,7 +13,6 @@ import ( "github.com/btcsuite/btcd/chaincfg" "github.com/minio/blake2b-simd" - "github.com/OpenBazaar/spvwallet" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -42,7 +41,7 @@ var ( const ( sigHashMask = 0x1f - branchID = 0xf5b9230b + branchID = 0x2BB40E60 ) func (w *ZCashWallet) buildTx(amount int64, addr btc.Address, feeLevel wi.FeeLevel, optionalOutput *wire.TxOut) (*wire.MsgTx, error) { @@ -297,32 +296,36 @@ func newUnsignedTransaction(outputs []*wire.TxOut, feePerKb btc.Amount, fetchInp } } -func (w *ZCashWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *ZCashWallet) bumpFee(txid string) (string, error) { txn, err := w.db.Txns().Get(txid) if err != nil { - return nil, err + return "", err } if txn.Height > 0 { - return nil, spvwallet.BumpFeeAlreadyConfirmedError + return "", util.BumpFeeAlreadyConfirmedError } if txn.Height < 0 { - return nil, spvwallet.BumpFeeTransactionDeadError + return "", util.BumpFeeTransactionDeadError + } + chTxid, err := chainhash.NewHashFromStr(txid) + if err != nil { + return "", err } // Check utxos for CPFP utxos, _ := w.db.Utxos().GetAll() for _, u := range utxos { - if u.Op.Hash.IsEqual(&txid) && u.AtHeight == 0 { + if u.Op.Hash.IsEqual(chTxid) && u.AtHeight == 0 { addr, err := w.ScriptToAddress(u.ScriptPubkey) if err != nil { - return nil, err + return "", err } key, err := w.km.GetKeyForScript(addr.ScriptAddress()) if err != nil { - return nil, err + return "", err } h, err := hex.DecodeString(u.Op.Hash.String()) if err != nil { - return nil, err + return "", err } n := new(big.Int) n, _ = n.SetString(u.Value, 10) @@ -334,15 +337,15 @@ func (w *ZCashWallet) bumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { } transactionID, err := w.sweepAddress([]wi.TransactionInput{in}, nil, key, nil, wi.FEE_BUMP) if err != nil { - return nil, err + return "", err } return transactionID, nil } } - return nil, spvwallet.BumpFeeNotFoundError + return "", util.BumpFeeNotFoundError } -func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { var internalAddr btc.Address if address != nil { internalAddr = *address @@ -351,7 +354,7 @@ func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Addre } script, err := zaddr.PayToAddrScript(internalAddr) if err != nil { - return nil, err + return "", err } var val int64 @@ -363,11 +366,11 @@ func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Addre values = append(values, in.Value.Int64()) ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { - return nil, err + return "", err } script, err := zaddr.PayToAddrScript(in.LinkedAddress) if err != nil { - return nil, err + return "", err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) @@ -379,7 +382,7 @@ func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Addre txType := P2PKH if redeemScript != nil { txType = P2SH_1of2_Multisig - _, err := spvwallet.LockTimeFromRedeemScript(*redeemScript) + _, err := util.LockTimeFromRedeemScript(*redeemScript) if err == nil { txType = P2SH_Multisig_Timelock_1Sig } @@ -412,13 +415,13 @@ func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Addre // Sign tx privKey, err := key.ECPrivKey() if err != nil { - return nil, err + return "", err } for i, txIn := range tx.TxIn { sig, err := rawTxInSignature(tx, i, *redeemScript, txscript.SigHashAll, privKey, values[i]) if err != nil { - return nil, errors.New("failed to sign transaction") + return "", errors.New("failed to sign transaction") } builder := txscript.NewScriptBuilder() builder.AddOp(txscript.OP_0) @@ -428,7 +431,7 @@ func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Addre } script, err := builder.Script() if err != nil { - return nil, err + return "", err } txIn.SignatureScript = script } @@ -436,9 +439,9 @@ func (w *ZCashWallet) sweepAddress(ins []wi.TransactionInput, address *btc.Addre // broadcast txid, err := w.Broadcast(tx) if err != nil { - return nil, err + return "", err } - return chainhash.NewHashFromStr(txid) + return txid, nil } func (w *ZCashWallet) createMultisigSignature(ins []wi.TransactionInput, outs []wi.TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte uint64) ([]wi.Signature, error) { diff --git a/vendor/github.com/OpenBazaar/multiwallet/zcash/wallet.go b/vendor/github.com/OpenBazaar/multiwallet/zcash/wallet.go index 3cf2a12c07..e1179f813c 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/zcash/wallet.go +++ b/vendor/github.com/OpenBazaar/multiwallet/zcash/wallet.go @@ -19,7 +19,6 @@ import ( zaddr "github.com/OpenBazaar/multiwallet/zcash/address" wi "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" hd "github.com/btcsuite/btcutil/hdkeychain" @@ -260,7 +259,7 @@ func (w *ZCashWallet) Transactions() ([]wi.Txn, error) { return txns, nil } -func (w *ZCashWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { +func (w *ZCashWallet) GetTransaction(txid string) (wi.Txn, error) { txn, err := w.db.Txns().Get(txid) if err == nil { tx := wire.NewMsgTx(1) @@ -287,7 +286,7 @@ func (w *ZCashWallet) GetTransaction(txid chainhash.Hash) (wi.Txn, error) { return txn, err } -func (w *ZCashWallet) ChainTip() (uint32, chainhash.Hash) { +func (w *ZCashWallet) ChainTip() (uint32, string) { return w.ws.ChainTip() } @@ -295,7 +294,7 @@ func (w *ZCashWallet) GetFeePerByte(feeLevel wi.FeeLevel) big.Int { return *big.NewInt(int64(w.fp.GetFeePerByte(feeLevel))) } -func (w *ZCashWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (*chainhash.Hash, error) { +func (w *ZCashWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.FeeLevel, referenceID string, spendAll bool) (string, error) { var ( tx *wire.MsgTx err error @@ -303,24 +302,24 @@ func (w *ZCashWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi.Fe if spendAll { tx, err = w.buildSpendAllTx(addr, feeLevel) if err != nil { - return nil, err + return "", err } } else { tx, err = w.buildTx(amount.Int64(), addr, feeLevel, nil) if err != nil { - return nil, err + return "", err } } // Broadcast txid, err := w.Broadcast(tx) if err != nil { - return nil, err + return "", err } - return chainhash.NewHashFromStr(txid) + return txid, nil } -func (w *ZCashWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *ZCashWallet) BumpFee(txid string) (string, error) { return w.bumpFee(txid) } @@ -341,7 +340,7 @@ func (w *ZCashWallet) EstimateSpendFee(amount big.Int, feeLevel wi.FeeLevel) (bi return *big.NewInt(int64(val)), err } -func (w *ZCashWallet) SweepAddress(ins []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (*chainhash.Hash, error) { +func (w *ZCashWallet) SweepAddress(ins []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { return w.sweepAddress(ins, address, key, redeemScript, feeLevel) } @@ -400,7 +399,7 @@ func (w *ZCashWallet) ReSyncBlockchain(fromTime time.Time) { go w.ws.UpdateState() } -func (w *ZCashWallet) GetConfirmations(txid chainhash.Hash) (uint32, uint32, error) { +func (w *ZCashWallet) GetConfirmations(txid string) (uint32, uint32, error) { txn, err := w.db.Txns().Get(txid) if err != nil { return 0, 0, err diff --git a/vendor/github.com/OpenBazaar/spvwallet/fees.go b/vendor/github.com/OpenBazaar/spvwallet/fees.go index 8d116e8c6b..46f15bd8f3 100644 --- a/vendor/github.com/OpenBazaar/spvwallet/fees.go +++ b/vendor/github.com/OpenBazaar/spvwallet/fees.go @@ -81,16 +81,16 @@ func (fp *FeeProvider) GetFeePerByte(feeLevel wallet.FeeLevel) uint64 { fees = fp.cache.fees } switch feeLevel { - case wallet.PRIORITY: - return fp.selectFee(fees.Priority, wallet.PRIORITY) + case wallet.PRIOIRTY: + return fp.selectFee(fees.Priority, wallet.PRIOIRTY) case wallet.NORMAL: - return fp.selectFee(fees.Normal, wallet.PRIORITY) + return fp.selectFee(fees.Normal, wallet.PRIOIRTY) case wallet.ECONOMIC: - return fp.selectFee(fees.Economic, wallet.PRIORITY) + return fp.selectFee(fees.Economic, wallet.PRIOIRTY) case wallet.SUPER_ECONOMIC: - return fp.selectFee(fees.SuperEconomic, wallet.PRIORITY) + return fp.selectFee(fees.SuperEconomic, wallet.PRIOIRTY) case wallet.FEE_BUMP: - return fp.selectFee(fees.Priority, wallet.PRIORITY) + return fp.selectFee(fees.Priority, wallet.PRIOIRTY) default: return fp.normalFee } @@ -108,7 +108,7 @@ func (fp *FeeProvider) selectFee(fee uint64, feeLevel wallet.FeeLevel) uint64 { func (fp *FeeProvider) defaultFee(feeLevel wallet.FeeLevel) uint64 { switch feeLevel { - case wallet.PRIORITY: + case wallet.PRIOIRTY: return fp.priorityFee case wallet.NORMAL: return fp.normalFee diff --git a/vendor/github.com/OpenBazaar/spvwallet/sortsignsend.go b/vendor/github.com/OpenBazaar/spvwallet/sortsignsend.go index 1947afcc3d..ed24876528 100644 --- a/vendor/github.com/OpenBazaar/spvwallet/sortsignsend.go +++ b/vendor/github.com/OpenBazaar/spvwallet/sortsignsend.go @@ -102,7 +102,7 @@ func (w *SPVWallet) gatherCoins() map[coinset.Coin]*hd.ExtendedKey { return m } -func (w *SPVWallet) Spend(amount big.Int, addr btc.Address, feeLevel wallet.FeeLevel, referenceID string, spendAll bool) (*chainhash.Hash, error) { +func (w *SPVWallet) Spend(amount big.Int, addr btc.Address, feeLevel wallet.FeeLevel, referenceID string, spendAll bool) (string, error) { var ( tx *wire.MsgTx err error @@ -110,36 +110,36 @@ func (w *SPVWallet) Spend(amount big.Int, addr btc.Address, feeLevel wallet.FeeL if spendAll { tx, err = w.buildSpendAllTx(addr, feeLevel) if err != nil { - return nil, err + return "", err } } else { tx, err = w.buildTx(amount.Int64(), addr, feeLevel, nil) if err != nil { - return nil, err + return "", err } } if err := w.Broadcast(tx); err != nil { - return nil, err + return "", err } ch := tx.TxHash() - return &ch, nil + return ch.String(), nil } var BumpFeeAlreadyConfirmedError = errors.New("Transaction is confirmed, cannot bump fee") var BumpFeeTransactionDeadError = errors.New("Cannot bump fee of dead transaction") var BumpFeeNotFoundError = errors.New("Transaction either doesn't exist or has already been spent") -func (w *SPVWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { +func (w *SPVWallet) BumpFee(txid string) (string, error) { txn, err := w.txstore.Txns().Get(txid) if err != nil { - return nil, err + return "", err } if txn.Height > 0 { - return nil, BumpFeeAlreadyConfirmedError + return "", BumpFeeAlreadyConfirmedError } if txn.Height < 0 { - return nil, BumpFeeTransactionDeadError + return "", BumpFeeTransactionDeadError } // Check stxos for RBF opportunity /*stxos, _ := w.txstore.Stxos().GetAll() @@ -196,18 +196,18 @@ func (w *SPVWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { // Check utxos for CPFP utxos, _ := w.txstore.Utxos().GetAll() for _, u := range utxos { - if u.Op.Hash.IsEqual(&txid) && u.AtHeight == 0 { + if u.Op.Hash.String() == txid && u.AtHeight == 0 { addr, err := w.ScriptToAddress(u.ScriptPubkey) if err != nil { - return nil, err + return "", err } key, err := w.keyManager.GetKeyForScript(addr.ScriptAddress()) if err != nil { - return nil, err + return "", err } h, err := hex.DecodeString(u.Op.Hash.String()) if err != nil { - return nil, err + return "", err } val0, _ := new(big.Int).SetString(u.Value, 10) in := wallet.TransactionInput{ @@ -218,12 +218,12 @@ func (w *SPVWallet) BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) { } transactionID, err := w.SweepAddress([]wallet.TransactionInput{in}, nil, key, nil, wallet.FEE_BUMP) if err != nil { - return nil, err + return "", err } return transactionID, nil } } - return nil, BumpFeeNotFoundError + return "", BumpFeeNotFoundError } func (w *SPVWallet) EstimateFee(ins []wallet.TransactionInput, outs []wallet.TransactionOutput, feePerByte big.Int) big.Int { @@ -471,7 +471,7 @@ func (w *SPVWallet) Multisign(ins []wallet.TransactionInput, outs []wallet.Trans return buf.Bytes(), nil } -func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wallet.FeeLevel) (*chainhash.Hash, error) { +func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wallet.FeeLevel) (string, error) { var internalAddr btc.Address if address != nil { internalAddr = *address @@ -480,7 +480,7 @@ func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Add } script, err := txscript.PayToAddrScript(internalAddr) if err != nil { - return nil, err + return "", err } var val int64 @@ -490,11 +490,11 @@ func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Add val += in.Value.Int64() ch, err := chainhash.NewHashFromStr(hex.EncodeToString(in.OutpointHash)) if err != nil { - return nil, err + return "", err } script, err := txscript.PayToAddrScript(in.LinkedAddress) if err != nil { - return nil, err + return "", err } outpoint := wire.NewOutPoint(ch, in.OutpointIndex) input := wire.NewTxIn(outpoint, []byte{}, [][]byte{}) @@ -537,7 +537,7 @@ func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Add // Sign tx privKey, err := key.ECPrivKey() if err != nil { - return nil, err + return "", err } pk := privKey.PubKey().SerializeCompressed() addressPub, err := btc.NewAddressPubKey(pk, w.params) @@ -569,7 +569,7 @@ func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Add for _, txIn := range tx.TxIn { locktime, err := LockTimeFromRedeemScript(*redeemScript) if err != nil { - return nil, err + return "", err } txIn.Sequence = locktime } @@ -584,13 +584,13 @@ func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Add tx, i, prevOutScript, txscript.SigHashAll, getKey, getScript, txIn.SignatureScript) if err != nil { - return nil, errors.New("Failed to sign transaction") + return "", errors.New("Failed to sign transaction") } txIn.SignatureScript = script } else { sig, err := txscript.RawTxInWitnessSignature(tx, hashes, i, ins[i].Value.Int64(), *redeemScript, txscript.SigHashAll, privKey) if err != nil { - return nil, err + return "nil", err } var witness wire.TxWitness if timeLocked { @@ -606,7 +606,7 @@ func (w *SPVWallet) SweepAddress(ins []wallet.TransactionInput, address *btc.Add // broadcast w.Broadcast(tx) txid := tx.TxHash() - return &txid, nil + return txid.String(), nil } func (w *SPVWallet) buildTx(amount int64, addr btc.Address, feeLevel wallet.FeeLevel, optionalOutput *wire.TxOut) (*wire.MsgTx, error) { diff --git a/vendor/github.com/OpenBazaar/spvwallet/txstore.go b/vendor/github.com/OpenBazaar/spvwallet/txstore.go index 1fde4b6cf9..54f1f60435 100644 --- a/vendor/github.com/OpenBazaar/spvwallet/txstore.go +++ b/vendor/github.com/OpenBazaar/spvwallet/txstore.go @@ -362,7 +362,7 @@ func (ts *TxStore) Ingest(tx *wire.MsgTx, height int32, timestamp time.Time) (ui if hits > 0 || matchesWatchOnly { ts.cbMutex.Lock() ts.txidsMutex.Lock() - txn, err := ts.Txns().Get(tx.TxHash()) + txn, err := ts.Txns().Get(tx.TxHash().String()) shouldCallback := false if err != nil { cb.Value = *big.NewInt(value) @@ -376,7 +376,7 @@ func (ts *TxStore) Ingest(tx *wire.MsgTx, height int32, timestamp time.Time) (ui // Let's check the height before committing so we don't allow rogue peers to send us a lose // tx that resets our height to zero. if err == nil && txn.Height <= 0 { - ts.Txns().UpdateHeight(tx.TxHash(), int(height), txn.Timestamp) + ts.Txns().UpdateHeight(tx.TxHash().String(), int(height), txn.Timestamp) ts.txids[tx.TxHash().String()] = height if height > 0 { val0, _ := new(big.Int).SetString(txn.Value, 10) @@ -409,7 +409,7 @@ func (ts *TxStore) markAsDead(txid chainhash.Hash) error { if err != nil { return err } - err = ts.Txns().UpdateHeight(s.SpendTxid, -1, time.Now()) + err = ts.Txns().UpdateHeight(s.SpendTxid.String(), -1, time.Now()) if err != nil { return err } @@ -448,7 +448,7 @@ func (ts *TxStore) markAsDead(txid chainhash.Hash) error { } } } - ts.Txns().UpdateHeight(txid, -1, time.Now()) + ts.Txns().UpdateHeight(txid.String(), -1, time.Now()) return nil } diff --git a/vendor/github.com/OpenBazaar/spvwallet/wallet.go b/vendor/github.com/OpenBazaar/spvwallet/wallet.go index 25f19e1fae..efd804b4e7 100644 --- a/vendor/github.com/OpenBazaar/spvwallet/wallet.go +++ b/vendor/github.com/OpenBazaar/spvwallet/wallet.go @@ -379,7 +379,7 @@ func (w *SPVWallet) Transactions() ([]wallet.Txn, error) { return txns, nil } -func (w *SPVWallet) GetTransaction(txid chainhash.Hash) (wallet.Txn, error) { +func (w *SPVWallet) GetTransaction(txid string) (wallet.Txn, error) { txn, err := w.txstore.Txns().Get(txid) if err == nil { tx := wire.NewMsgTx(1) @@ -410,7 +410,7 @@ func (w *SPVWallet) GetTransaction(txid chainhash.Hash) (wallet.Txn, error) { return txn, err } -func (w *SPVWallet) GetConfirmations(txid chainhash.Hash) (uint32, uint32, error) { +func (w *SPVWallet) GetConfirmations(txid string) (uint32, uint32, error) { txn, err := w.txstore.Txns().Get(txid) if err != nil { return 0, 0, err @@ -451,13 +451,12 @@ func (w *SPVWallet) AddTransactionListener(callback func(wallet.TransactionCallb w.txstore.listeners = append(w.txstore.listeners, callback) } -func (w *SPVWallet) ChainTip() (uint32, chainhash.Hash) { - var ch chainhash.Hash +func (w *SPVWallet) ChainTip() (uint32, string) { sh, err := w.blockchain.db.GetBestHeader() if err != nil { - return 0, ch + return 0, "" } - return sh.height, sh.header.BlockHash() + return sh.height, sh.header.BlockHash().String() } func (w *SPVWallet) AddWatchedAddresses(addrs ...btc.Address) error { diff --git a/vendor/github.com/OpenBazaar/wallet-interface/datastore.go b/vendor/github.com/OpenBazaar/wallet-interface/datastore.go index 4d91b9e0c6..fc3e16848b 100644 --- a/vendor/github.com/OpenBazaar/wallet-interface/datastore.go +++ b/vendor/github.com/OpenBazaar/wallet-interface/datastore.go @@ -24,12 +24,14 @@ const ( Zcash = 133 BitcoinCash = 145 Ethereum = 60 + Filecoin = 461 TestnetBitcoin = 1000000 TestnetLitecoin = 1000001 TestnetZcash = 1000133 TestnetBitcoinCash = 1000145 TestnetEthereum = 1000060 + TestnetFilecoin = 1000461 ) func (c *CoinType) String() string { @@ -44,6 +46,8 @@ func (c *CoinType) String() string { return "Litecoin" case Ethereum: return "Ethereum" + case Filecoin: + return "Filecoin" case TestnetBitcoin: return "Testnet Bitcoin" case TestnetBitcoinCash: @@ -54,6 +58,8 @@ func (c *CoinType) String() string { return "Testnet Litecoin" case TestnetEthereum: return "Testnet Ethereum" + case TestnetFilecoin: + return "Testnet Filecoin" default: return "" } @@ -71,6 +77,8 @@ func (c *CoinType) CurrencyCode() string { return "LTC" case Ethereum: return "ETH" + case Filecoin: + return "FIL" case TestnetBitcoin: return "TBTC" case TestnetBitcoinCash: @@ -81,6 +89,8 @@ func (c *CoinType) CurrencyCode() string { return "TLTC" case TestnetEthereum: return "TETH" + case TestnetFilecoin: + return "TFIL" default: return "" } @@ -124,16 +134,16 @@ type Txns interface { Put(raw []byte, txid, value string, height int, timestamp time.Time, watchOnly bool) error // Fetch a tx and it's metadata given a hash - Get(txid chainhash.Hash) (Txn, error) + Get(txid string) (Txn, error) // Fetch all transactions from the db GetAll(includeWatchOnly bool) ([]Txn, error) // Update the height of a transaction - UpdateHeight(txid chainhash.Hash, height int, timestamp time.Time) error + UpdateHeight(txid string, height int, timestamp time.Time) error // Delete a transactions from the db - Delete(txid *chainhash.Hash) error + Delete(txid string) error } // Keys provides a database interface for the wallet to save key material, track diff --git a/vendor/github.com/OpenBazaar/wallet-interface/wallet.go b/vendor/github.com/OpenBazaar/wallet-interface/wallet.go index a60985c715..6cc2d7ca9b 100644 --- a/vendor/github.com/OpenBazaar/wallet-interface/wallet.go +++ b/vendor/github.com/OpenBazaar/wallet-interface/wallet.go @@ -5,7 +5,6 @@ import ( "math/big" "time" - "github.com/btcsuite/btcd/chaincfg/chainhash" btc "github.com/btcsuite/btcutil" hd "github.com/btcsuite/btcutil/hdkeychain" ) @@ -46,9 +45,7 @@ import ( // escrowed payment. type Wallet interface { walletMustManager - walletMustKeysmither walletMustBanker - walletCanBumpFee } var ( @@ -68,6 +65,74 @@ type WalletMustManuallyAssociateTransactionToOrder interface { AssociateTransactionWithOrder(cb TransactionCallback) } +// EscrowWallet is implemented by wallets capable of performing escrow transactions. +type EscrowWallet interface { + // SweepAddress should sweep all the funds from the provided inputs into the provided `address` using the given + // `key`. If `address` is nil, the funds should be swept into an internal address own by this wallet. + // If the `redeemScript` is not nil, this should be treated as a multisig (p2sh) address and signed accordingly. + // + // This method is called by openbazaar-go in the following scenarios: + // 1) The buyer placed a direct order to a vendor who was offline. The buyer sent funds into a 1 of 2 multisig. + // Upon returning online the vendor accepts the order and calls SweepAddress to move the funds into his wallet. + // + // 2) Same as above but the buyer wishes to cancel the order before the vendor comes online. He calls SweepAddress + // to return the funds from the 1 of 2 multisig back into has wallet. + // + // 3) Same as above but rather than accepting the order, the vendor rejects it. When the buyer receives the reject + // message he calls SweepAddress to move the funds back into his wallet. + // + // 4) The timeout has expired on a 2 of 3 multisig. The vendor calls SweepAddress to claim the funds. + SweepAddress(ins []TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel FeeLevel) (string, error) + + // GenerateMultisigScript should deterministically create a redeem script and address from the information provided. + // This method should be strictly limited to taking the input data, combining it to produce the redeem script and + // address and that's it. There is no need to interact with the network or make any transactions when this is called. + // + // Openbazaar-go will call this method in the following situations: + // 1) When the buyer places an order he passes in the relevant keys for each party to get back the address where + // the funds should be sent and the redeem script. The redeem script is saved in order (and openbazaar-go database). + // + // 2) The vendor calls this method when he receives and order so as to validate that the address they buyer is sending + // funds to is indeed correctly constructed. If this method fails to return the same values for the vendor as it + // did the buyer, the vendor will reject the order. + // + // 3) The moderator calls this function upon receiving a dispute so that he can validate the payment address for the + // order and make sure neither party is trying to maliciously lie about the details of the dispute to get the moderator + // to release the funds. + // + // Note that according to the order flow, this method is called by the buyer *before* the order is sent to the vendor, + // and before the vendor validates the order. Only after the buyer hears back from the vendor does the buyer send + // funds (either from an external wallet or via the `Spend` method) to the address specified in this method's return. + // + // `threshold` is the number of keys required to release the funds from the address. If `threshold` is two and len(keys) + // is three, this is a two of three multisig. If `timeoutKey` is not nil, then the script should allow the funds to + // be released with a signature from the `timeoutKey` after the `timeout` duration has passed. + // For example: + // OP_IF 2 3 OP_ELSE OP_CHECKSEQUENCEVERIFY OP_CHECKSIG OP_ENDIF + // + // If `timeoutKey` is nil then the a normal multisig without a timeout should be created. + GenerateMultisigScript(keys []hd.ExtendedKey, threshold int, timeout time.Duration, timeoutKey *hd.ExtendedKey) (addr btc.Address, redeemScript []byte, err error) + + // CreateMultisigSignature should build a transaction using the given inputs and outputs and sign it with the + // provided key. A list of signatures (one for each input) should be returned. + // + // This method is called by openbazaar-go by each party whenever they decide to release the funds from escrow. + // This method should not actually move any funds or make any transactions, only create necessary signatures to + // do so. The caller will then take the signature and share it with the other parties. Once all parties have shared + // their signatures, the person who wants to release the funds collects them and uses them as an input to the + // `Multisign` method. + CreateMultisigSignature(ins []TransactionInput, outs []TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte big.Int) ([]Signature, error) + + // Multisign collects all of the signatures generated by the `CreateMultisigSignature` function and builds a final + // transaction that can then be broadcast to the blockchain. The []byte return is the raw transaction. It should be + // broadcasted if `broadcast` is true. If the signatures combine and produce an invalid transaction then an error + // should be returned. + // + // This method is called by openbazaar-go by whichever party to the escrow is trying to release the funds only after + // all needed parties have signed using `CreateMultisigSignature` and have shared their signatures with each other. + Multisign(ins []TransactionInput, outs []TransactionOutput, sigs1 []Signature, sigs2 []Signature, redeemScript []byte, feePerByte big.Int, broadcast bool) ([]byte, error) +} + type walletMustManager interface { // Start is called when the openbazaar-go daemon starts up. At this point in time // the wallet implementation should start syncing and/or updating balances, but @@ -140,10 +205,10 @@ type walletMustManager interface { Transactions() ([]Txn, error) // GetTransaction return info on a specific transaction given the txid. - GetTransaction(txid chainhash.Hash) (Txn, error) + GetTransaction(txid string) (Txn, error) // ChainTip returns the best block hash and height of the blockchain. - ChainTip() (uint32, chainhash.Hash) + ChainTip() (uint32, string) // ReSyncBlockchain is called in response to a user action to rescan transactions. API based // wallets should do another scan of their addresses to find anything missing. Full node, or SPV @@ -151,10 +216,8 @@ type walletMustManager interface { ReSyncBlockchain(fromTime time.Time) // GetConfirmations returns the number of confirmations and the height for a transaction. - GetConfirmations(txid chainhash.Hash) (confirms, atHeight uint32, err error) -} + GetConfirmations(txid string) (confirms, atHeight uint32, err error) -type walletMustKeysmither interface { // ChildKey generate a child key using the given chaincode. Each openbazaar-go node // keeps a master key (an hd secp256k1 key) that it uses in multisig transactions. // Rather than use the key directly (which would result in an on chain privacy leak), @@ -170,54 +233,6 @@ type walletMustKeysmither interface { // transaction that the other party(s) signed does indeed pay to an address that we // control. HasKey(addr btc.Address) bool - - // GenerateMultisigScript should deterministically create a redeem script and address from the information provided. - // This method should be strictly limited to taking the input data, combining it to produce the redeem script and - // address and that's it. There is no need to interact with the network or make any transactions when this is called. - // - // Openbazaar-go will call this method in the following situations: - // 1) When the buyer places an order he passes in the relevant keys for each party to get back the address where - // the funds should be sent and the redeem script. The redeem script is saved in order (and openbazaar-go database). - // - // 2) The vendor calls this method when he receives and order so as to validate that the address they buyer is sending - // funds to is indeed correctly constructed. If this method fails to return the same values for the vendor as it - // did the buyer, the vendor will reject the order. - // - // 3) The moderator calls this function upon receiving a dispute so that he can validate the payment address for the - // order and make sure neither party is trying to maliciously lie about the details of the dispute to get the moderator - // to release the funds. - // - // Note that according to the order flow, this method is called by the buyer *before* the order is sent to the vendor, - // and before the vendor validates the order. Only after the buyer hears back from the vendor does the buyer send - // funds (either from an external wallet or via the `Spend` method) to the address specified in this method's return. - // - // `threshold` is the number of keys required to release the funds from the address. If `threshold` is two and len(keys) - // is three, this is a two of three multisig. If `timeoutKey` is not nil, then the script should allow the funds to - // be released with a signature from the `timeoutKey` after the `timeout` duration has passed. - // For example: - // OP_IF 2 3 OP_ELSE OP_CHECKSEQUENCEVERIFY OP_CHECKSIG OP_ENDIF - // - // If `timeoutKey` is nil then the a normal multisig without a timeout should be created. - GenerateMultisigScript(keys []hd.ExtendedKey, threshold int, timeout time.Duration, timeoutKey *hd.ExtendedKey) (addr btc.Address, redeemScript []byte, err error) - - // CreateMultisigSignature should build a transaction using the given inputs and outputs and sign it with the - // provided key. A list of signatures (one for each input) should be returned. - // - // This method is called by openbazaar-go by each party whenever they decide to release the funds from escrow. - // This method should not actually move any funds or make any transactions, only create necessary signatures to - // do so. The caller will then take the signature and share it with the other parties. Once all parties have shared - // their signatures, the person who wants to release the funds collects them and uses them as an input to the - // `Multisign` method. - CreateMultisigSignature(ins []TransactionInput, outs []TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte big.Int) ([]Signature, error) - - // Multisign collects all of the signatures generated by the `CreateMultisigSignature` function and builds a final - // transaction that can then be broadcast to the blockchain. The []byte return is the raw transaction. It should be - // broadcasted if `broadcast` is true. If the signatures combine and produce an invalid transaction then an error - // should be returned. - // - // This method is called by openbazaar-go by whichever party to the escrow is trying to release the funds only after - // all needed parties have signed using `CreateMultisigSignature` and have shared their signatures with each other. - Multisign(ins []TransactionInput, outs []TransactionOutput, sigs1 []Signature, sigs2 []Signature, redeemScript []byte, feePerByte big.Int, broadcast bool) ([]byte, error) } type walletMustBanker interface { @@ -238,7 +253,7 @@ type walletMustBanker interface { // be swept to the provided payment address. For most coins this entails subtracting the // transaction fee from the total amount being sent rather than adding it on as is normally // the case when spendAll is false. - Spend(amount big.Int, addr btc.Address, feeLevel FeeLevel, referenceID string, spendAll bool) (*chainhash.Hash, error) + Spend(amount big.Int, addr btc.Address, feeLevel FeeLevel, referenceID string, spendAll bool) (string, error) // EstimateFee should return the estimate fee that will be required to make a transaction // spending from the given inputs to the given outputs. FeePerByte is denominated in @@ -253,37 +268,20 @@ type walletMustBanker interface { // // All amounts should be in the coin's base unit (for example: satoshis). EstimateSpendFee(amount big.Int, feeLevel FeeLevel) (big.Int, error) - - // SweepAddress should sweep all the funds from the provided inputs into the provided `address` using the given - // `key`. If `address` is nil, the funds should be swept into an internal address own by this wallet. - // If the `redeemScript` is not nil, this should be treated as a multisig (p2sh) address and signed accordingly. - // - // This method is called by openbazaar-go in the following scenarios: - // 1) The buyer placed a direct order to a vendor who was offline. The buyer sent funds into a 1 of 2 multisig. - // Upon returning online the vendor accepts the order and calls SweepAddress to move the funds into his wallet. - // - // 2) Same as above but the buyer wishes to cancel the order before the vendor comes online. He calls SweepAddress - // to return the funds from the 1 of 2 multisig back into has wallet. - // - // 3) Same as above but rather than accepting the order, the vendor rejects it. When the buyer receives the reject - // message he calls SweepAddress to move the funds back into his wallet. - // - // 4) The timeout has expired on a 2 of 3 multisig. The vendor calls SweepAddress to claim the funds. - SweepAddress(ins []TransactionInput, address *btc.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel FeeLevel) (*chainhash.Hash, error) } -type walletCanBumpFee interface { +type WalletCanBumpFee interface { // BumpFee should attempt to bump the fee on a given unconfirmed transaction (if possible) to // try to get it confirmed and return the txid of the new transaction (if one exists). // Since this method is only called in response to user action, it is acceptable to // return an error if this functionality is not available in this wallet or on the network. - BumpFee(txid chainhash.Hash) (*chainhash.Hash, error) + BumpFee(txid string) (string, error) } type FeeLevel int const ( - PRIORITY FeeLevel = 0 + PRIOIRTY FeeLevel = 0 NORMAL = 1 ECONOMIC = 2 FEE_BUMP = 3 diff --git a/vendor/github.com/daaku/go.zipexe/go.mod b/vendor/github.com/daaku/go.zipexe/go.mod new file mode 100644 index 0000000000..ac905c75ca --- /dev/null +++ b/vendor/github.com/daaku/go.zipexe/go.mod @@ -0,0 +1,3 @@ +module github.com/daaku/go.zipexe + +go 1.11 diff --git a/vendor/github.com/daaku/go.zipexe/license b/vendor/github.com/daaku/go.zipexe/license new file mode 100644 index 0000000000..6a2f15c1d3 --- /dev/null +++ b/vendor/github.com/daaku/go.zipexe/license @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright © 2012-2015 Carlos Castillo + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/daaku/go.zipexe/readme.md b/vendor/github.com/daaku/go.zipexe/readme.md new file mode 100644 index 0000000000..3ec0e8e56e --- /dev/null +++ b/vendor/github.com/daaku/go.zipexe/readme.md @@ -0,0 +1,5 @@ +go.zipexe +========= + +This module was taken as-is from https://github.com/cookieo9/resources-go. +Documentation: https://godoc.org/github.com/daaku/go.zipexe diff --git a/vendor/github.com/daaku/go.zipexe/zipexe.go b/vendor/github.com/daaku/go.zipexe/zipexe.go new file mode 100644 index 0000000000..6004606237 --- /dev/null +++ b/vendor/github.com/daaku/go.zipexe/zipexe.go @@ -0,0 +1,142 @@ +// Package zipexe attempts to open an executable binary file as a zip file. +package zipexe + +import ( + "archive/zip" + "debug/elf" + "debug/macho" + "debug/pe" + "errors" + "io" + "os" +) + +// Opens a zip file by path. +func Open(path string) (*zip.Reader, error) { + _, rd, err := OpenCloser(path) + return rd, err +} + +// OpenCloser is like Open but returns an additional Closer to avoid leaking open files. +func OpenCloser(path string) (io.Closer, *zip.Reader, error) { + file, err := os.Open(path) + if err != nil { + return nil, nil, err + } + finfo, err := file.Stat() + if err != nil { + return nil, nil, err + } + zr, err := NewReader(file, finfo.Size()) + if err != nil { + return nil, nil, err + } + return file, zr, nil +} + +// Open a zip file, specially handling various binaries that may have been +// augmented with zip data. +func NewReader(rda io.ReaderAt, size int64) (*zip.Reader, error) { + handlers := []func(io.ReaderAt, int64) (*zip.Reader, error){ + zip.NewReader, + zipExeReaderMacho, + zipExeReaderElf, + zipExeReaderPe, + } + + for _, handler := range handlers { + zfile, err := handler(rda, size) + if err == nil { + return zfile, nil + } + } + return nil, errors.New("Couldn't Open As Executable") +} + +// zipExeReaderMacho treats the file as a Mach-O binary +// (Mac OS X / Darwin executable) and attempts to find a zip archive. +func zipExeReaderMacho(rda io.ReaderAt, size int64) (*zip.Reader, error) { + file, err := macho.NewFile(rda) + if err != nil { + return nil, err + } + + var max int64 + for _, load := range file.Loads { + seg, ok := load.(*macho.Segment) + if ok { + // Check if the segment contains a zip file + if zfile, err := zip.NewReader(seg, int64(seg.Filesz)); err == nil { + return zfile, nil + } + + // Otherwise move end of file pointer + end := int64(seg.Offset + seg.Filesz) + if end > max { + max = end + } + } + } + + // No zip file within binary, try appended to end + section := io.NewSectionReader(rda, max, size-max) + return zip.NewReader(section, section.Size()) +} + +// zipExeReaderPe treats the file as a Portable Exectuable binary +// (Windows executable) and attempts to find a zip archive. +func zipExeReaderPe(rda io.ReaderAt, size int64) (*zip.Reader, error) { + file, err := pe.NewFile(rda) + if err != nil { + return nil, err + } + + var max int64 + for _, sec := range file.Sections { + // Check if this section has a zip file + if zfile, err := zip.NewReader(sec, int64(sec.Size)); err == nil { + return zfile, nil + } + + // Otherwise move end of file pointer + end := int64(sec.Offset + sec.Size) + if end > max { + max = end + } + } + + // No zip file within binary, try appended to end + section := io.NewSectionReader(rda, max, size-max) + return zip.NewReader(section, section.Size()) +} + +// zipExeReaderElf treats the file as a ELF binary +// (linux/BSD/etc... executable) and attempts to find a zip archive. +func zipExeReaderElf(rda io.ReaderAt, size int64) (*zip.Reader, error) { + file, err := elf.NewFile(rda) + if err != nil { + return nil, err + } + + var max int64 + for _, sect := range file.Sections { + if sect.Type == elf.SHT_NOBITS { + continue + } + + // Check if this section has a zip file + if zfile, err := zip.NewReader(sect, int64(sect.Size)); err == nil { + return zfile, nil + } + + // Otherwise move end of file pointer + end := int64(sect.Offset + sect.Size) + if end > max { + max = end + } + } + + // No zip file within binary, try appended to end + section := io.NewSectionReader(rda, max, size-max) + return zip.NewReader(section, section.Size()) +} diff --git a/vendor/github.com/filecoin-project/go-address/CONTRIBUTING.md b/vendor/github.com/filecoin-project/go-address/CONTRIBUTING.md new file mode 100644 index 0000000000..5118774981 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing to this repo + +First, thank you for your interest in contributing to this project! Before you pick up your first issue and start +changing code, please: + +1. Review all documentation for the module you're interested in. +1. Look through the [issues for this repo](https://github.com/filecoin-project/go-address/issues) for relevant discussions. +1. If you have questions about an issue, post a comment in the issue. +1. If you want to submit changes that aren't covered by an issue, file a new one with your proposal, outlining what problem you found/feature you want to implement, and how you intend to implement a solution. + +For best results, before submitting a PR, make sure: +1. It has met all acceptance criteria for the issue. +1. It addresses only the one issue and does not make other, irrelevant changes. +1. Your code conforms to our coding style guide. +1. You have adequate test coverage (this should be indicated by CI results anyway). +1. If you like, check out [current PRs](https://github.com/filecoin-project/go-address/pulls) to see how others do it. + +Special Note: +If editing README.md, please conform to the [standard readme specification](https://github.com/RichardLitt/standard-readme/blob/master/spec.md). + +Before a PR can be merged to `master`, it must: +1. Pass continuous integration. +1. Be approved by at least two maintainers + +### Testing + +- All new code should be accompanied by unit tests. Prefer focused unit tests to integration tests for thorough validation of behaviour. Existing code is not necessarily a good model, here. +- Integration tests should test integration, not comprehensive functionality +- Tests should be placed in a separate package named `$PACKAGE_test`. For example, a test of the `chain` package should live in a package named `chain_test`. In limited situations, exceptions may be made for some "white box" tests placed in the same package as the code it tests. + +### Conventions and Style + +#### Imports +We use the following import ordering. +``` +import ( + [stdlib packages, alpha-sorted] + + [external packages] + + [go-address packages] +) +``` + +Where a package name does not match its directory name, an explicit alias is expected (`goimports` will add this for you). + +Example: + +```go +package address_test + +import ( + "bytes" + "encoding/base32" + "fmt" + "math" + "math/rand" + "strconv" + "testing" + "time" + + ffi "github.com/filecoin-project/filecoin-ffi" + "github.com/multiformats/go-varint" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/filecoin-project/go-crypto" + + "github.com/filecoin-project/go-address" +) +``` + +#### Comments +Comments are a communication to other developers (including your future self) to help them understand and maintain code. Good comments describe the _intent_ of the code, without repeating the procedures directly. + +- A `TODO:` comment describes a change that is desired but could not be immediately implemented. It must include a reference to a GitHub issue outlining whatever prevents the thing being done now (which could just be a matter of priority). +- A `NOTE:` comment indicates an aside, some background info, or ideas for future improvement, rather than the intent of the current code. It's often fine to document such ideas alongside the code rather than an issue (at the loss of a space for discussion). +- `FIXME`, `HACK`, `XXX` and similar tags indicating that some code is to be avoided in favour of `TODO`, `NOTE` or some straight prose. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-address/COPYRIGHT b/vendor/github.com/filecoin-project/go-address/COPYRIGHT new file mode 100644 index 0000000000..771e6f7cd7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-address/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-address/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-address/LICENSE-MIT b/vendor/github.com/filecoin-project/go-address/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-address/Makefile b/vendor/github.com/filecoin-project/go-address/Makefile new file mode 100644 index 0000000000..6e8b72d9b5 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/Makefile @@ -0,0 +1,7 @@ +all: build +.PHONY: all + + +build: + go build +.PHONY: build diff --git a/vendor/github.com/filecoin-project/go-address/README.md b/vendor/github.com/filecoin-project/go-address/README.md new file mode 100644 index 0000000000..5a7400d18d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/README.md @@ -0,0 +1,43 @@ +# go-address +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![CircleCI](https://circleci.com/gh/filecoin-project/go-address.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-address) +[![codecov](https://codecov.io/gh/filecoin-project/go-address/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-address) + +The filecoin address type, used for identifying actors on the filecoin network, in various formats. + +## Install + +Install this library with `go mod` + +## Usage + +Addresses support various types of encoding formats and have constructors +for each format + +```golang +// address from ID +idAddress := NewIDAddress(id) +// address from a secp pub key +secp256k1Address := NewSecp256k1Address(pubkey) +// address from data for actor protocol +actorAddress := NewActorAddress(data) +// address from the BLS pubkey +blsAddress := NewBLSAddress(pubkey) +``` + +Serialization + +```golang +var outBuf io.writer +err := address.MarshalCBOR(outbuf) +var inBuf io.reader +err := address.UnmarshalCBOR(inbuf) +``` + +## Project-level documentation +The filecoin-project has a [community repo](https://github.com/filecoin-project/community) that documents in more detail our policies and guidelines, such as discussion forums and chat rooms and [Code of Conduct](https://github.com/filecoin-project/community/blob/master/CODE_OF_CONDUCT.md). + +## License +This repository is dual-licensed under Apache 2.0 and MIT terms. + +Copyright 2019. Protocol Labs, Inc. diff --git a/vendor/github.com/filecoin-project/go-address/address.go b/vendor/github.com/filecoin-project/go-address/address.go new file mode 100644 index 0000000000..29cd402b0f --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/address.go @@ -0,0 +1,427 @@ +package address + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "math" + "strconv" + + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/minio/blake2b-simd" + "github.com/multiformats/go-varint" + "github.com/polydawn/refmt/obj/atlas" + "golang.org/x/xerrors" + + cbg "github.com/whyrusleeping/cbor-gen" +) + +func init() { + cbor.RegisterCborType(addressAtlasEntry) +} + +var addressAtlasEntry = atlas.BuildEntry(Address{}).Transform(). + TransformMarshal(atlas.MakeMarshalTransformFunc( + func(a Address) (string, error) { + return string(a.Bytes()), nil + })). + TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( + func(x string) (Address, error) { + return NewFromBytes([]byte(x)) + })). + Complete() + +// CurrentNetwork specifies which network the address belongs to +var CurrentNetwork = Testnet + +// Address is the go type that represents an address in the filecoin network. +type Address struct{ str string } + +// Undef is the type that represents an undefined address. +var Undef = Address{} + +// Network represents which network an address belongs to. +type Network = byte + +const ( + // Mainnet is the main network. + Mainnet Network = iota + // Testnet is the test network. + Testnet +) + +// MainnetPrefix is the main network prefix. +const MainnetPrefix = "f" + +// TestnetPrefix is the main network prefix. +const TestnetPrefix = "t" + +// Protocol represents which protocol an address uses. +type Protocol = byte + +const ( + // ID represents the address ID protocol. + ID Protocol = iota + // SECP256K1 represents the address SECP256K1 protocol. + SECP256K1 + // Actor represents the address Actor protocol. + Actor + // BLS represents the address BLS protocol. + BLS + + Unknown = Protocol(255) +) + +// Protocol returns the protocol used by the address. +func (a Address) Protocol() Protocol { + if len(a.str) == 0 { + return Unknown + } + return a.str[0] +} + +// Payload returns the payload of the address. +func (a Address) Payload() []byte { + if len(a.str) == 0 { + return nil + } + return []byte(a.str[1:]) +} + +// Bytes returns the address as bytes. +func (a Address) Bytes() []byte { + return []byte(a.str) +} + +// String returns an address encoded as a string. +func (a Address) String() string { + str, err := encode(CurrentNetwork, a) + if err != nil { + panic(err) // I don't know if this one is okay + } + return str +} + +// Empty returns true if the address is empty, false otherwise. +func (a Address) Empty() bool { + return a == Undef +} + +// Unmarshal unmarshals the cbor bytes into the address. +func (a Address) Unmarshal(b []byte) error { + return cbor.DecodeInto(b, &a) +} + +// Marshal marshals the address to cbor. +func (a Address) Marshal() ([]byte, error) { + return cbor.DumpObject(a) +} + +// UnmarshalJSON implements the json unmarshal interface. +func (a *Address) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + addr, err := decode(s) + if err != nil { + return err + } + *a = addr + return nil +} + +// MarshalJSON implements the json marshal interface. +func (a Address) MarshalJSON() ([]byte, error) { + return []byte(`"` + a.String() + `"`), nil +} + +func (a *Address) Scan(value interface{}) error { + switch value := value.(type) { + case string: + a1, err := decode(value) + if err != nil { + return err + } + + *a = a1 + + return nil + default: + return xerrors.New("non-string types unsupported") + } +} + +// NewIDAddress returns an address using the ID protocol. +func NewIDAddress(id uint64) (Address, error) { + if id > math.MaxInt64 { + return Undef, xerrors.New("IDs must be less than 2^63") + } + return newAddress(ID, varint.ToUvarint(id)) +} + +// NewSecp256k1Address returns an address using the SECP256K1 protocol. +func NewSecp256k1Address(pubkey []byte) (Address, error) { + return newAddress(SECP256K1, addressHash(pubkey)) +} + +// NewActorAddress returns an address using the Actor protocol. +func NewActorAddress(data []byte) (Address, error) { + return newAddress(Actor, addressHash(data)) +} + +// NewBLSAddress returns an address using the BLS protocol. +func NewBLSAddress(pubkey []byte) (Address, error) { + return newAddress(BLS, pubkey) +} + +// NewFromString returns the address represented by the string `addr`. +func NewFromString(addr string) (Address, error) { + return decode(addr) +} + +// NewFromBytes return the address represented by the bytes `addr`. +func NewFromBytes(addr []byte) (Address, error) { + if len(addr) == 0 { + return Undef, nil + } + if len(addr) == 1 { + return Undef, ErrInvalidLength + } + return newAddress(addr[0], addr[1:]) +} + +// Checksum returns the checksum of `ingest`. +func Checksum(ingest []byte) []byte { + return hash(ingest, checksumHashConfig) +} + +// ValidateChecksum returns true if the checksum of `ingest` is equal to `expected`> +func ValidateChecksum(ingest, expect []byte) bool { + digest := Checksum(ingest) + return bytes.Equal(digest, expect) +} + +func addressHash(ingest []byte) []byte { + return hash(ingest, payloadHashConfig) +} + +func newAddress(protocol Protocol, payload []byte) (Address, error) { + switch protocol { + case ID: + _, n, err := varint.FromUvarint(payload) + if err != nil { + return Undef, xerrors.Errorf("could not decode: %v: %w", err, ErrInvalidPayload) + } + if n != len(payload) { + return Undef, xerrors.Errorf("different varint length (v:%d != p:%d): %w", + n, len(payload), ErrInvalidPayload) + } + case SECP256K1, Actor: + if len(payload) != PayloadHashLength { + return Undef, ErrInvalidPayload + } + case BLS: + if len(payload) != BlsPublicKeyBytes { + return Undef, ErrInvalidPayload + } + default: + return Undef, ErrUnknownProtocol + } + explen := 1 + len(payload) + buf := make([]byte, explen) + + buf[0] = protocol + copy(buf[1:], payload) + + return Address{string(buf)}, nil +} + +func encode(network Network, addr Address) (string, error) { + if addr == Undef { + return UndefAddressString, nil + } + var ntwk string + switch network { + case Mainnet: + ntwk = MainnetPrefix + case Testnet: + ntwk = TestnetPrefix + default: + return UndefAddressString, ErrUnknownNetwork + } + + var strAddr string + switch addr.Protocol() { + case SECP256K1, Actor, BLS: + cksm := Checksum(append([]byte{addr.Protocol()}, addr.Payload()...)) + strAddr = ntwk + fmt.Sprintf("%d", addr.Protocol()) + AddressEncoding.WithPadding(-1).EncodeToString(append(addr.Payload(), cksm[:]...)) + case ID: + i, n, err := varint.FromUvarint(addr.Payload()) + if err != nil { + return UndefAddressString, xerrors.Errorf("could not decode varint: %w", err) + } + if n != len(addr.Payload()) { + return UndefAddressString, xerrors.Errorf("payload contains additional bytes") + } + strAddr = fmt.Sprintf("%s%d%d", ntwk, addr.Protocol(), i) + default: + return UndefAddressString, ErrUnknownProtocol + } + return strAddr, nil +} + +func decode(a string) (Address, error) { + if len(a) == 0 { + return Undef, nil + } + if a == UndefAddressString { + return Undef, nil + } + if len(a) > MaxAddressStringLength || len(a) < 3 { + return Undef, ErrInvalidLength + } + + if string(a[0]) != MainnetPrefix && string(a[0]) != TestnetPrefix { + return Undef, ErrUnknownNetwork + } + + var protocol Protocol + switch a[1] { + case '0': + protocol = ID + case '1': + protocol = SECP256K1 + case '2': + protocol = Actor + case '3': + protocol = BLS + default: + return Undef, ErrUnknownProtocol + } + + raw := a[2:] + if protocol == ID { + // 20 is length of math.MaxUint64 as a string + if len(raw) > 20 { + return Undef, ErrInvalidLength + } + id, err := strconv.ParseUint(raw, 10, 64) + if err != nil { + return Undef, ErrInvalidPayload + } + return newAddress(protocol, varint.ToUvarint(id)) + } + + payloadcksm, err := AddressEncoding.WithPadding(-1).DecodeString(raw) + if err != nil { + return Undef, err + } + payload := payloadcksm[:len(payloadcksm)-ChecksumHashLength] + cksm := payloadcksm[len(payloadcksm)-ChecksumHashLength:] + + if protocol == SECP256K1 || protocol == Actor { + if len(payload) != 20 { + return Undef, ErrInvalidPayload + } + } + + if !ValidateChecksum(append([]byte{protocol}, payload...), cksm) { + return Undef, ErrInvalidChecksum + } + + return newAddress(protocol, payload) +} + +func hash(ingest []byte, cfg *blake2b.Config) []byte { + hasher, err := blake2b.New(cfg) + if err != nil { + // If this happens sth is very wrong. + panic(fmt.Sprintf("invalid address hash configuration: %v", err)) // ok + } + if _, err := hasher.Write(ingest); err != nil { + // blake2bs Write implementation never returns an error in its current + // setup. So if this happens sth went very wrong. + panic(fmt.Sprintf("blake2b is unable to process hashes: %v", err)) // ok + } + return hasher.Sum(nil) +} + +func (a Address) MarshalBinary() ([]byte, error) { + return a.Bytes(), nil +} + +func (a *Address) UnmarshalBinary(b []byte) error { + newAddr, err := NewFromBytes(b) + if err != nil { + return err + } + *a = newAddr + return nil +} + +func (a *Address) MarshalCBOR(w io.Writer) error { + if a == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + if *a == Undef { + return fmt.Errorf("cannot marshal undefined address") + } + + if err := cbg.WriteMajorTypeHeader(w, cbg.MajByteString, uint64(len(a.str))); err != nil { + return err + } + + if _, err := io.WriteString(w, a.str); err != nil { + return err + } + + return nil +} + +func (a *Address) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + + maj, extra, err := cbg.CborReadHeader(br) + if err != nil { + return err + } + + if maj != cbg.MajByteString { + return fmt.Errorf("cbor type for address unmarshal was not byte string") + } + + if extra > 64 { + return fmt.Errorf("too many bytes to unmarshal for an address") + } + + buf := make([]byte, int(extra)) + if _, err := io.ReadFull(br, buf); err != nil { + return err + } + + addr, err := NewFromBytes(buf) + if err != nil { + return err + } + if addr == Undef { + return fmt.Errorf("cbor input should not contain empty addresses") + } + + *a = addr + + return nil +} + +func IDFromAddress(addr Address) (uint64, error) { + if addr.Protocol() != ID { + return 0, xerrors.Errorf("cannot get id from non id address") + } + + i, _, err := varint.FromUvarint(addr.Payload()) + return i, err +} diff --git a/vendor/github.com/filecoin-project/go-address/constants.go b/vendor/github.com/filecoin-project/go-address/constants.go new file mode 100644 index 0000000000..e3266df740 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/constants.go @@ -0,0 +1,71 @@ +package address + +import ( + "encoding/base32" + "errors" + + "github.com/minio/blake2b-simd" +) + +func init() { + + var err error + + TestAddress, err = NewActorAddress([]byte("satoshi")) + if err != nil { + panic(err) + } + + TestAddress2, err = NewActorAddress([]byte("nakamoto")) + if err != nil { + panic(err) + } +} + +var ( + // TestAddress is an account with some initial funds in it. + TestAddress Address + // TestAddress2 is an account with some initial funds in it. + TestAddress2 Address +) + +var ( + // ErrUnknownNetwork is returned when encountering an unknown network in an address. + ErrUnknownNetwork = errors.New("unknown address network") + + // ErrUnknownProtocol is returned when encountering an unknown protocol in an address. + ErrUnknownProtocol = errors.New("unknown address protocol") + // ErrInvalidPayload is returned when encountering an invalid address payload. + ErrInvalidPayload = errors.New("invalid address payload") + // ErrInvalidLength is returned when encountering an address of invalid length. + ErrInvalidLength = errors.New("invalid address length") + // ErrInvalidChecksum is returned when encountering an invalid address checksum. + ErrInvalidChecksum = errors.New("invalid address checksum") +) + +// UndefAddressString is the string used to represent an empty address when encoded to a string. +var UndefAddressString = "" + +// PayloadHashLength defines the hash length taken over addresses using the Actor and SECP256K1 protocols. +const PayloadHashLength = 20 + +// ChecksumHashLength defines the hash length used for calculating address checksums. +const ChecksumHashLength = 4 + +// MaxAddressStringLength is the max length of an address encoded as a string +// it include the network prefx, protocol, and bls publickey +const MaxAddressStringLength = 2 + 84 + +// BlsPublicKeyBytes is the length of a BLS public key +const BlsPublicKeyBytes = 48 + +// BlsPrivateKeyBytes is the length of a BLS private key +const BlsPrivateKeyBytes = 32 + +var payloadHashConfig = &blake2b.Config{Size: PayloadHashLength} +var checksumHashConfig = &blake2b.Config{Size: ChecksumHashLength} + +const encodeStd = "abcdefghijklmnopqrstuvwxyz234567" + +// AddressEncoding defines the base32 config used for address encoding and decoding. +var AddressEncoding = base32.NewEncoding(encodeStd) diff --git a/vendor/github.com/filecoin-project/go-address/go.mod b/vendor/github.com/filecoin-project/go-address/go.mod new file mode 100644 index 0000000000..5ed051ced9 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/go.mod @@ -0,0 +1,14 @@ +module github.com/filecoin-project/go-address + +go 1.13 + +require ( + github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 + github.com/ipfs/go-ipld-cbor v0.0.4 + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 + github.com/multiformats/go-varint v0.0.5 + github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 + github.com/stretchr/testify v1.4.0 + github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 +) diff --git a/vendor/github.com/filecoin-project/go-address/go.sum b/vendor/github.com/filecoin-project/go-address/go.sum new file mode 100644 index 0000000000..f22ff83028 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/go.sum @@ -0,0 +1,100 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c h1:BMg3YUwLEUIYBJoYZVhA4ZDTciXRj6r7ffOCshWrsoE= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/filecoin-project/go-address/testing.go b/vendor/github.com/filecoin-project/go-address/testing.go new file mode 100644 index 0000000000..9bc41376dc --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/testing.go @@ -0,0 +1,20 @@ +package address + +import ( + "fmt" +) + +// NewForTestGetter returns a closure that returns an address unique to that invocation. +// The address is unique wrt the closure returned, not globally. +func NewForTestGetter() func() Address { + i := 0 + return func() Address { + s := fmt.Sprintf("address%d", i) + i++ + newAddr, err := NewActorAddress([]byte(s)) + if err != nil { + panic(err) + } + return newAddr + } +} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-amt-ipld/v2/LICENSE-APACHE new file mode 100644 index 0000000000..14478a3b60 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/LICENSE-MIT b/vendor/github.com/filecoin-project/go-amt-ipld/v2/LICENSE-MIT new file mode 100644 index 0000000000..72dc60d84b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/README.md b/vendor/github.com/filecoin-project/go-amt-ipld/v2/README.md new file mode 100644 index 0000000000..8472b00909 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/README.md @@ -0,0 +1,8 @@ +# go-amt-ipld + +> Array Mapped Trie (Persistent Vector) implementation using go-ipld + + +## License + +Dual MIT and Apache 2 diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/amt.go b/vendor/github.com/filecoin-project/go-amt-ipld/v2/amt.go new file mode 100644 index 0000000000..9cea1cb25e --- /dev/null +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/amt.go @@ -0,0 +1,556 @@ +package amt + +import ( + "bytes" + "context" + "fmt" + "math/bits" + + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var log = logging.Logger("amt") + +const ( + // Width must be a power of 2. We set this to 8. + maxIndexBits = 63 + widthBits = 3 + width = 1 << widthBits // 8 + bitfieldSize = 1 // ((width - 1) >> 3) + 1 + maxHeight = maxIndexBits/widthBits - 1 // 20 (because the root is at height 0). +) + +// MaxIndex is the maximum index for elements in the AMT. This is currently 1^63 +// (max int) because the width is 8. That means every "level" consumes 3 bits +// from the index, and 63/3 is a nice even 21 +const MaxIndex = uint64(1< maxHeight { + return nil, fmt.Errorf("failed to load AMT: height out of bounds: %d > %d", r.Height, maxHeight) + } + + r.store = bs + + return &r, nil +} + +func (r *Root) Set(ctx context.Context, i uint64, val interface{}) error { + if i > MaxIndex { + return fmt.Errorf("index %d is out of range for the amt", i) + } + + var b []byte + if m, ok := val.(cbg.CBORMarshaler); ok { + buf := new(bytes.Buffer) + if err := m.MarshalCBOR(buf); err != nil { + return err + } + b = buf.Bytes() + } else { + var err error + b, err = cbor.DumpObject(val) + if err != nil { + return err + } + } + + for i >= nodesForHeight(int(r.Height)+1) { + if !r.Node.empty() { + if err := r.Node.Flush(ctx, r.store, int(r.Height)); err != nil { + return err + } + + c, err := r.store.Put(ctx, &r.Node) + if err != nil { + return err + } + + r.Node = Node{ + Bmap: [...]byte{0x01}, + Links: []cid.Cid{c}, + } + } + r.Height++ + } + + addVal, err := r.Node.set(ctx, r.store, int(r.Height), i, &cbg.Deferred{Raw: b}) + if err != nil { + return err + } + + if addVal { + r.Count++ + } + + return nil +} + +func FromArray(ctx context.Context, bs cbor.IpldStore, vals []cbg.CBORMarshaler) (cid.Cid, error) { + r := NewAMT(bs) + if err := r.BatchSet(ctx, vals); err != nil { + return cid.Undef, err + } + + return r.Flush(ctx) +} + +func (r *Root) BatchSet(ctx context.Context, vals []cbg.CBORMarshaler) error { + // TODO: there are more optimized ways of doing this method + for i, v := range vals { + if err := r.Set(ctx, uint64(i), v); err != nil { + return err + } + } + return nil +} + +func (r *Root) Get(ctx context.Context, i uint64, out interface{}) error { + if i > MaxIndex { + return fmt.Errorf("index %d is out of range for the amt", i) + } + + if i >= nodesForHeight(int(r.Height+1)) { + return &ErrNotFound{Index: i} + } + return r.Node.get(ctx, r.store, int(r.Height), i, out) +} + +func (n *Node) get(ctx context.Context, bs cbor.IpldStore, height int, i uint64, out interface{}) error { + subi := i / nodesForHeight(height) + set, _ := n.getBit(subi) + if !set { + return &ErrNotFound{i} + } + if height == 0 { + n.expandValues() + + d := n.expVals[i] + + if um, ok := out.(cbg.CBORUnmarshaler); ok { + return um.UnmarshalCBOR(bytes.NewReader(d.Raw)) + } else { + return cbor.DecodeInto(d.Raw, out) + } + } + + subn, err := n.loadNode(ctx, bs, subi, false) + if err != nil { + return err + } + + return subn.get(ctx, bs, height-1, i%nodesForHeight(height), out) +} + +func (r *Root) BatchDelete(ctx context.Context, indices []uint64) error { + // TODO: theres a faster way of doing this, but this works for now + for _, i := range indices { + if err := r.Delete(ctx, i); err != nil { + return err + } + } + + return nil +} + +func (r *Root) Delete(ctx context.Context, i uint64) error { + if i > MaxIndex { + return fmt.Errorf("index %d is out of range for the amt", i) + } + //fmt.Printf("i: %d, h: %d, nfh: %d\n", i, r.Height, nodesForHeight(int(r.Height))) + if i >= nodesForHeight(int(r.Height+1)) { + return &ErrNotFound{i} + } + + if err := r.Node.delete(ctx, r.store, int(r.Height), i); err != nil { + return err + } + r.Count-- + + for r.Node.Bmap[0] == 1 && r.Height > 0 { + sub, err := r.Node.loadNode(ctx, r.store, 0, false) + if err != nil { + return err + } + + r.Node = *sub + r.Height-- + } + + return nil +} + +func (n *Node) delete(ctx context.Context, bs cbor.IpldStore, height int, i uint64) error { + subi := i / nodesForHeight(height) + set, _ := n.getBit(subi) + if !set { + return &ErrNotFound{i} + } + if height == 0 { + n.expandValues() + + n.expVals[i] = nil + n.clearBit(i) + + return nil + } + + subn, err := n.loadNode(ctx, bs, subi, false) + if err != nil { + return err + } + + if err := subn.delete(ctx, bs, height-1, i%nodesForHeight(height)); err != nil { + return err + } + + if subn.empty() { + n.clearBit(subi) + n.cache[subi] = nil + n.expLinks[subi] = cid.Undef + } + + return nil +} + +// Subtract removes all elements of 'or' from 'r' +func (r *Root) Subtract(ctx context.Context, or *Root) error { + // TODO: as with other methods, there should be an optimized way of doing this + return or.ForEach(ctx, func(i uint64, _ *cbg.Deferred) error { + return r.Delete(ctx, i) + }) +} + +func (r *Root) ForEach(ctx context.Context, cb func(uint64, *cbg.Deferred) error) error { + return r.Node.forEachAt(ctx, r.store, int(r.Height), 0, 0, cb) +} + +func (r *Root) ForEachAt(ctx context.Context, start uint64, cb func(uint64, *cbg.Deferred) error) error { + return r.Node.forEachAt(ctx, r.store, int(r.Height), start, 0, cb) +} + +func (n *Node) forEachAt(ctx context.Context, bs cbor.IpldStore, height int, start, offset uint64, cb func(uint64, *cbg.Deferred) error) error { + if height == 0 { + n.expandValues() + + for i, v := range n.expVals { + if v != nil { + ix := offset + uint64(i) + if ix < start { + continue + } + + if err := cb(offset+uint64(i), v); err != nil { + return err + } + } + } + + return nil + } + + if n.cache == nil { + n.expandLinks() + } + + subCount := nodesForHeight(height) + for i, v := range n.expLinks { + var sub Node + if n.cache[i] != nil { + sub = *n.cache[i] + } else if v != cid.Undef { + if err := bs.Get(ctx, v, &sub); err != nil { + return err + } + } else { + continue + } + + offs := offset + (uint64(i) * subCount) + nextOffs := offs + subCount + if start >= nextOffs { + continue + } + + if err := sub.forEachAt(ctx, bs, height-1, start, offs, cb); err != nil { + return err + } + } + return nil + +} + +func (r *Root) FirstSetIndex(ctx context.Context) (uint64, error) { + return r.Node.firstSetIndex(ctx, r.store, int(r.Height)) +} + +var errNoVals = fmt.Errorf("no values") + +func (n *Node) firstSetIndex(ctx context.Context, bs cbor.IpldStore, height int) (uint64, error) { + if height == 0 { + n.expandValues() + for i, v := range n.expVals { + if v != nil { + return uint64(i), nil + } + } + // Would be really weird if we ever actually hit this + return 0, errNoVals + } + + if n.cache == nil { + n.expandLinks() + } + + for i := 0; i < width; i++ { + ok, _ := n.getBit(uint64(i)) + if ok { + subn, err := n.loadNode(ctx, bs, uint64(i), false) + if err != nil { + return 0, err + } + + ix, err := subn.firstSetIndex(ctx, bs, height-1) + if err != nil { + return 0, err + } + + subCount := nodesForHeight(height) + return ix + (uint64(i) * subCount), nil + } + } + + return 0, errNoVals +} + +func (n *Node) expandValues() { + if len(n.expVals) == 0 { + n.expVals = make([]*cbg.Deferred, width) + for x := uint64(0); x < width; x++ { + set, ix := n.getBit(x) + if set { + n.expVals[x] = n.Values[ix] + } + } + } +} + +func (n *Node) set(ctx context.Context, bs cbor.IpldStore, height int, i uint64, val *cbg.Deferred) (bool, error) { + //nfh := nodesForHeight(height) + //fmt.Printf("[set] h: %d, i: %d, subi: %d\n", height, i, i/nfh) + if height == 0 { + n.expandValues() + alreadySet, _ := n.getBit(i) + n.expVals[i] = val + n.setBit(i) + + return !alreadySet, nil + } + + nfh := nodesForHeight(height) + + subn, err := n.loadNode(ctx, bs, i/nfh, true) + if err != nil { + return false, err + } + + return subn.set(ctx, bs, height-1, i%nfh, val) +} + +func (n *Node) getBit(i uint64) (bool, int) { + if i > 7 { + panic("cant deal with wider arrays yet") + } + + if len(n.Bmap) == 0 { + return false, 0 + } + + if n.Bmap[0]&byte(1< 7 { + panic("cant deal with wider arrays yet") + } + + if len(n.Bmap) == 0 { + n.Bmap = [...]byte{0} + } + + n.Bmap[0] = n.Bmap[0] | byte(1< 7 { + panic("cant deal with wider arrays yet") + } + + if len(n.Bmap) == 0 { + panic("invariant violated: called clear bit on empty node") + } + + mask := byte(0xff - (1 << i)) + + n.Bmap[0] = n.Bmap[0] & mask +} + +func (n *Node) expandLinks() { + n.cache = make([]*Node, width) + n.expLinks = make([]cid.Cid, width) + for x := uint64(0); x < width; x++ { + set, ix := n.getBit(x) + if set { + n.expLinks[x] = n.Links[ix] + } + } +} + +func (n *Node) loadNode(ctx context.Context, bs cbor.IpldStore, i uint64, create bool) (*Node, error) { + if n.cache == nil { + n.expandLinks() + } else { + if n := n.cache[i]; n != nil { + return n, nil + } + } + + set, _ := n.getBit(i) + + var subn *Node + if set { + var sn Node + if err := bs.Get(ctx, n.expLinks[i], &sn); err != nil { + return nil, err + } + + subn = &sn + } else { + if create { + subn = &Node{} + n.setBit(i) + } else { + return nil, fmt.Errorf("no node found at (sub)index %d", i) + } + } + n.cache[i] = subn + + return subn, nil +} + +func nodesForHeight(height int) uint64 { + heightLogTwo := uint64(widthBits * height) + if heightLogTwo >= 64 { + // Should never happen. Max height is checked at all entry points. + panic("height overflow") + } + return 1 << heightLogTwo +} + +func (r *Root) Flush(ctx context.Context) (cid.Cid, error) { + if err := r.Node.Flush(ctx, r.store, int(r.Height)); err != nil { + return cid.Undef, err + } + + return r.store.Put(ctx, r) +} + +func (n *Node) empty() bool { + return len(n.Bmap) == 0 || n.Bmap[0] == 0 +} + +func (n *Node) Flush(ctx context.Context, bs cbor.IpldStore, depth int) error { + if depth == 0 { + if len(n.expVals) == 0 { + return nil + } + n.Values = nil + for i := uint64(0); i < width; i++ { + v := n.expVals[i] + if v != nil { + n.Values = append(n.Values, v) + n.setBit(i) + } + } + return nil + } + + if len(n.expLinks) == 0 { + // nothing to do! + return nil + } + + n.Bmap = [...]byte{0} + n.Links = nil + + for i := uint64(0); i < width; i++ { + subn := n.cache[i] + if subn != nil { + if err := subn.Flush(ctx, bs, depth-1); err != nil { + return err + } + + c, err := bs.Put(ctx, subn) + if err != nil { + return err + } + n.expLinks[i] = c + } + + l := n.expLinks[i] + if l != cid.Undef { + n.Links = append(n.Links, l) + n.setBit(i) + } + } + + return nil +} + +type ErrNotFound struct { + Index uint64 +} + +func (e ErrNotFound) Error() string { + return fmt.Sprintf("Index %d not found in AMT", e.Index) +} + +func (e ErrNotFound) NotFound() bool { + return true +} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/cbor_gen.go b/vendor/github.com/filecoin-project/go-amt-ipld/v2/cbor_gen.go new file mode 100644 index 0000000000..f316beedbe --- /dev/null +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/cbor_gen.go @@ -0,0 +1,261 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package amt + +import ( + "fmt" + "io" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufRoot = []byte{131} + +func (t *Root) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufRoot); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Height (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Height)); err != nil { + return err + } + + // t.Count (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Count)); err != nil { + return err + } + + // t.Node (amt.Node) (struct) + if err := t.Node.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Root) UnmarshalCBOR(r io.Reader) error { + *t = Root{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Height (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Height = uint64(extra) + + } + // t.Count (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Count = uint64(extra) + + } + // t.Node (amt.Node) (struct) + + { + + if err := t.Node.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Node: %w", err) + } + + } + return nil +} + +var lengthBufNode = []byte{131} + +func (t *Node) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufNode); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Bmap ([1]uint8) (array) + if len(t.Bmap) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Bmap was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Bmap))); err != nil { + return err + } + + if _, err := w.Write(t.Bmap[:]); err != nil { + return err + } + + // t.Links ([]cid.Cid) (slice) + if len(t.Links) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Links was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Links))); err != nil { + return err + } + for _, v := range t.Links { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Links: %w", err) + } + } + + // t.Values ([]*typegen.Deferred) (slice) + if len(t.Values) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Values was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Values))); err != nil { + return err + } + for _, v := range t.Values { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *Node) UnmarshalCBOR(r io.Reader) error { + *t = Node{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Bmap ([1]uint8) (array) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Bmap: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra != 1 { + return fmt.Errorf("expected array to have 1 elements") + } + + t.Bmap = [1]uint8{} + + if _, err := io.ReadFull(br, t.Bmap[:]); err != nil { + return err + } + // t.Links ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Links: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Links = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.Links failed: %w", err) + } + t.Links[i] = c + } + + // t.Values ([]*typegen.Deferred) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Values: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Values = make([]*cbg.Deferred, extra) + } + + for i := 0; i < int(extra); i++ { + + var v cbg.Deferred + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Values[i] = &v + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.mod b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.mod new file mode 100644 index 0000000000..2b36bc7959 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.mod @@ -0,0 +1,13 @@ +module github.com/filecoin-project/go-amt-ipld/v2 + +go 1.12 + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.6 + github.com/ipfs/go-ipld-cbor v0.0.4 + github.com/ipfs/go-log v1.0.4 + github.com/stretchr/testify v1.6.1 + github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.sum b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.sum new file mode 100644 index 0000000000..ebb6eabab7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.sum @@ -0,0 +1,143 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 h1:LHFlP/ktDvOnCap7PsT87cs7Gwd0p+qv6Qm5g2ZPR+I= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/github.com/filecoin-project/go-bitfield/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-bitfield/LICENSE-APACHE new file mode 100644 index 0000000000..14478a3b60 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-bitfield/LICENSE-MIT b/vendor/github.com/filecoin-project/go-bitfield/LICENSE-MIT new file mode 100644 index 0000000000..72dc60d84b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-bitfield/README.md b/vendor/github.com/filecoin-project/go-bitfield/README.md new file mode 100644 index 0000000000..68c481eff5 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/README.md @@ -0,0 +1,16 @@ +# go-bitfield + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![CircleCI](https://circleci.com/gh/filecoin-project/go-bitfield.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-bitfield) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) + +> Advanced RLE+ implementation + +Features iterator based primitives that scale with number of runs instead of number of bits. + +## License + +The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms: + +- Apache License, Version 2.0, ([LICENSE-APACHE](https://github.com/filecoin-project/go-bitfield/blob/master/LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](https://github.com/filecoin-project/go-bitfield/blob/master/LICENSE-MIT) or http://opensource.org/licenses/MIT) diff --git a/vendor/github.com/filecoin-project/go-bitfield/bitfield.go b/vendor/github.com/filecoin-project/go-bitfield/bitfield.go new file mode 100644 index 0000000000..591889c4e2 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/bitfield.go @@ -0,0 +1,798 @@ +package bitfield + +import ( + "errors" + "fmt" + "io" + + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var ( + ErrBitFieldTooMany = errors.New("to many items in RLE") + ErrNoBitsSet = errors.New("bitfield has no set bits") +) + +// MaxEncodedSize is the maximum encoded size of a bitfield. When expanded into +// a slice of runs, a bitfield of this size should not exceed 2MiB of memory. +// +// This bitfield can fit at least 3072 sparse elements. +const MaxEncodedSize = 32 << 10 + +type BitField struct { + rle rlepluslazy.RLE + + set map[uint64]struct{} + unset map[uint64]struct{} +} + +// New constructs a new BitField. +func New() BitField { + bf, err := NewFromBytes([]byte{}) + if err != nil { + panic(fmt.Sprintf("creating empty rle: %+v", err)) + } + return bf +} + +// NewFromBytes deserializes the encoded bitfield. +func NewFromBytes(rle []byte) (BitField, error) { + bf := BitField{} + rlep, err := rlepluslazy.FromBuf(rle) + if err != nil { + return BitField{}, xerrors.Errorf("could not decode rle+: %w", err) + } + bf.rle = rlep + bf.set = make(map[uint64]struct{}) + bf.unset = make(map[uint64]struct{}) + return bf, nil + +} + +func newWithRle(rle rlepluslazy.RLE) BitField { + return BitField{ + set: make(map[uint64]struct{}), + unset: make(map[uint64]struct{}), + rle: rle, + } +} + +// NewFromSet constructs a bitfield from the given set. +func NewFromSet(setBits []uint64) BitField { + res := BitField{ + set: make(map[uint64]struct{}, len(setBits)), + unset: make(map[uint64]struct{}), + } + for _, b := range setBits { + res.set[b] = struct{}{} + } + return res +} + +// NewFromIter constructs a BitField from the RunIterator. +func NewFromIter(r rlepluslazy.RunIterator) (BitField, error) { + buf, err := rlepluslazy.EncodeRuns(r, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return BitField{}, err + } + + return newWithRle(rle), nil +} + +// MergeBitFields returns the union of the two BitFields. +// +// For example, given two BitFields: +// +// 0 1 1 0 1 +// 1 1 0 1 0 +// +// MergeBitFields would return +// +// 1 1 1 1 1 +// +// This operation's runtime is O(number of runs). +func MergeBitFields(a, b BitField) (BitField, error) { + ra, err := a.RunIterator() + if err != nil { + return BitField{}, err + } + + rb, err := b.RunIterator() + if err != nil { + return BitField{}, err + } + + merge, err := rlepluslazy.Or(ra, rb) + if err != nil { + return BitField{}, err + } + + mergebytes, err := rlepluslazy.EncodeRuns(merge, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(mergebytes) + if err != nil { + return BitField{}, err + } + + return newWithRle(rle), nil +} + +// MultiMerge returns the unions of all the passed BitFields. +// +// Calling MultiMerge is identical to calling MergeBitFields repeatedly, just +// more efficient when merging more than two BitFields. +// +// This operation's runtime is O(number of runs * number of bitfields). +func MultiMerge(bfs ...BitField) (BitField, error) { + if len(bfs) == 0 { + return NewFromSet(nil), nil + } + + iters := make([]rlepluslazy.RunIterator, 0, len(bfs)) + for _, bf := range bfs { + iter, err := bf.RunIterator() + if err != nil { + return BitField{}, err + } + iters = append(iters, iter) + } + + iter, err := rlepluslazy.Union(iters...) + if err != nil { + return BitField{}, err + } + return NewFromIter(iter) +} + +// CutBitField cuts bitfield B from bitfield A. For every bit in B cut from A, +// subsequent entries in A are shifted down by one. +// +// For example: +// +// a: 0 1 0 1 1 1 +// b: 0 1 1 0 0 0 +// +// c: 0 1 1 1 // cut +// c: 0 1 1 1 // remove holes +func CutBitField(a, b BitField) (BitField, error) { + aiter, err := a.RunIterator() + if err != nil { + return BitField{}, err + } + + biter, err := b.RunIterator() + if err != nil { + return BitField{}, err + } + + var ( + run, cutRun rlepluslazy.Run + output []rlepluslazy.Run + ) + for { + if !run.Valid() { + if !aiter.HasNext() { + // All done. + break + } + + run, err = aiter.NextRun() + if err != nil { + return BitField{}, err + } + } + + if !cutRun.Valid() && biter.HasNext() { + cutRun, err = biter.NextRun() + if err != nil { + return BitField{}, err + } + } + + var newRun rlepluslazy.Run + if !cutRun.Valid() { + newRun = run // keep remaining runs + run.Len = 0 + } else if cutRun.Len >= run.Len { + if !cutRun.Val { + newRun = run + } + cutRun.Len -= run.Len + run.Len = 0 + } else { + if !cutRun.Val { + newRun = rlepluslazy.Run{ + Val: run.Val, + Len: cutRun.Len, + } + } + run.Len -= cutRun.Len + cutRun.Len = 0 + } + + if newRun.Valid() { + if len(output) > 0 && output[len(output)-1].Val == newRun.Val { + // Join adjacent runs of 1s. We may cut in the middle of + // a run. + output[len(output)-1].Len += newRun.Len + } else { + output = append(output, newRun) + } + } + } + + buf, err := rlepluslazy.EncodeRuns(&rlepluslazy.RunSliceIterator{Runs: output}, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return BitField{}, err + } + + return BitField{rle: rle}, nil +} + +func (bf BitField) RunIterator() (rlepluslazy.RunIterator, error) { + iter, err := bf.rle.RunIterator() + if err != nil { + return nil, err + } + if len(bf.set) > 0 { + slc := make([]uint64, 0, len(bf.set)) + for b := range bf.set { + slc = append(slc, b) + } + set, err := rlepluslazy.RunsFromSlice(slc) + if err != nil { + return nil, err + } + newIter, err := rlepluslazy.Or(iter, set) + if err != nil { + return nil, err + } + iter = newIter + } + if len(bf.unset) > 0 { + slc := make([]uint64, 0, len(bf.unset)) + for b := range bf.unset { + slc = append(slc, b) + } + + unset, err := rlepluslazy.RunsFromSlice(slc) + if err != nil { + return nil, err + } + newIter, err := rlepluslazy.Subtract(iter, unset) + if err != nil { + return nil, err + } + iter = newIter + } + return iter, nil +} + +// Set sets the given bit in the BitField +// +// This operation's runtime is O(1) up-front. However, it adds an O(bits +// explicitly set) cost to all other operations. +func (bf BitField) Set(bit uint64) { + delete(bf.unset, bit) + bf.set[bit] = struct{}{} +} + +// Unset unsets given bit in the BitField +// +// This operation's runtime is O(1). However, it adds an O(bits +// explicitly unset) cost to all other operations. +func (bf BitField) Unset(bit uint64) { + delete(bf.set, bit) + bf.unset[bit] = struct{}{} +} + +// Count counts the non-zero bits in the bitfield. +// +// For example, given: +// +// 1 0 1 1 +// +// Count() will return 3. +// +// This operation's runtime is O(number of runs). +func (bf BitField) Count() (uint64, error) { + s, err := bf.RunIterator() + if err != nil { + return 0, err + } + return rlepluslazy.Count(s) +} + +// All returns a slice of set bits in sorted order. +// +// For example, given: +// +// 1 0 0 1 +// +// All will return: +// +// []uint64{0, 3} +// +// This operation's runtime is O(number of bits). +func (bf BitField) All(max uint64) ([]uint64, error) { + c, err := bf.Count() + if err != nil { + return nil, xerrors.Errorf("count errror: %w", err) + } + if c > max { + return nil, xerrors.Errorf("expected %d, got %d: %w", max, c, ErrBitFieldTooMany) + } + + runs, err := bf.RunIterator() + if err != nil { + return nil, err + } + + res, err := rlepluslazy.SliceFromRuns(runs) + if err != nil { + return nil, err + } + + return res, nil +} + +// AllMap returns a map of all set bits. +// +// For example, given: +// +// 1 0 0 1 +// +// All will return: +// +// map[uint64]bool{0: true, 3: true} +// +// This operation's runtime is O(number of bits). +func (bf BitField) AllMap(max uint64) (map[uint64]bool, error) { + c, err := bf.Count() + if err != nil { + return nil, xerrors.Errorf("count errror: %w", err) + } + if c > max { + return nil, xerrors.Errorf("expected %d, got %d: %w", max, c, ErrBitFieldTooMany) + } + + runs, err := bf.RunIterator() + if err != nil { + return nil, err + } + + res, err := rlepluslazy.SliceFromRuns(runs) + if err != nil { + return nil, err + } + + out := make(map[uint64]bool, len(res)) + for _, i := range res { + out[i] = true + } + return out, nil +} + +func (bf BitField) MarshalCBOR(w io.Writer) error { + var rle []byte + if len(bf.set) == 0 && len(bf.unset) == 0 { + // If unmodified, avoid re-encoding. + rle = bf.rle.Bytes() + } else { + + s, err := bf.RunIterator() + if err != nil { + return err + } + + rle, err = rlepluslazy.EncodeRuns(s, []byte{}) + if err != nil { + return err + } + } + + if len(rle) > MaxEncodedSize { + return xerrors.Errorf("encoded bitfield was too large (%d)", len(rle)) + } + + if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(rle)))); err != nil { + return err + } + if _, err := w.Write(rle); err != nil { + return xerrors.Errorf("writing rle: %w", err) + } + return nil +} + +func (bf *BitField) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + + maj, extra, err := cbg.CborReadHeader(br) + if err != nil { + return err + } + if extra > MaxEncodedSize { + return fmt.Errorf("array too large") + } + + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + buf := make([]byte, extra) + if _, err := io.ReadFull(br, buf); err != nil { + return err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return xerrors.Errorf("could not decode rle+: %w", err) + } + bf.rle = rle + bf.set = make(map[uint64]struct{}) + bf.unset = make(map[uint64]struct{}) + + return nil +} + +func (bf BitField) MarshalJSON() ([]byte, error) { + + c, err := bf.Copy() + if err != nil { + return nil, err + } + + return c.rle.MarshalJSON() +} + +func (bf *BitField) UnmarshalJSON(b []byte) error { + + err := bf.rle.UnmarshalJSON(b) + if err != nil { + return err + } + bf.set = make(map[uint64]struct{}) + bf.unset = make(map[uint64]struct{}) + return nil +} + +// ForEach iterates over each set bit. +// +// This operation's runtime is O(bits set). +func (bf BitField) ForEach(f func(uint64) error) error { + iter, err := bf.RunIterator() + if err != nil { + return err + } + + var i uint64 + for iter.HasNext() { + r, err := iter.NextRun() + if err != nil { + return err + } + + if r.Val { + for j := uint64(0); j < r.Len; j++ { + if err := f(i); err != nil { + return err + } + i++ + } + } else { + i += r.Len + } + } + return nil +} + +// IsSet returns true if the given bit is set. +// +// This operation's runtime is O(number of runs). +func (bf BitField) IsSet(x uint64) (bool, error) { + if _, ok := bf.set[x]; ok { + return true, nil + } + + if _, ok := bf.unset[x]; ok { + return false, nil + } + + iter, err := bf.rle.RunIterator() + if err != nil { + return false, err + } + + return rlepluslazy.IsSet(iter, x) +} + +// First returns the index of the first set bit. This function returns +// ErrNoBitsSet when no bits have been set. +// +// This operation's runtime is O(1). +func (bf BitField) First() (uint64, error) { + iter, err := bf.RunIterator() + if err != nil { + return 0, err + } + + var i uint64 + for iter.HasNext() { + r, err := iter.NextRun() + if err != nil { + return 0, err + } + + if r.Val { + return i, nil + } else { + i += r.Len + } + } + return 0, ErrNoBitsSet +} + +// Last returns the index of the last set bit. This function returns +// ErrNoBitsSet when no bits have been set. +// +// This operation's runtime is O(n). +func (bf BitField) Last() (uint64, error) { + iter, err := bf.RunIterator() + if err != nil { + return 0, err + } + + var ( + at, maxplusone uint64 + ) + for iter.HasNext() { + run, err := iter.NextRun() + if err != nil { + return 0, err + } + + at += run.Len + + if run.Val { + maxplusone = at + } + } + if maxplusone == 0 { + return 0, ErrNoBitsSet + } + return maxplusone - 1, nil +} + +// IsEmpty returns true if the bitset is empty. +// +// This operation's runtime is O(1). +func (bf BitField) IsEmpty() (bool, error) { + _, err := bf.First() + switch err { + case ErrNoBitsSet: + return true, nil + case nil: + return false, nil + default: + return false, err + } +} + +// Slice treats the BitField as an ordered set of set bits, then slices this set. +// +// That is, it skips start set bits, then returns the next count set bits. +// +// For example, given: +// +// 1 0 1 1 0 1 1 +// +// bf.Slice(2, 2) would return: +// +// 0 0 0 1 0 1 0 +// +// This operation's runtime is O(number of runs). +func (bf BitField) Slice(start, count uint64) (BitField, error) { + iter, err := bf.RunIterator() + if err != nil { + return BitField{}, err + } + + valsUntilStart := start + + var sliceRuns []rlepluslazy.Run + var i, outcount uint64 + for iter.HasNext() && valsUntilStart > 0 { + r, err := iter.NextRun() + if err != nil { + return BitField{}, err + } + + if r.Val { + if r.Len <= valsUntilStart { + valsUntilStart -= r.Len + i += r.Len + } else { + i += valsUntilStart + + rem := r.Len - valsUntilStart + if rem > count { + rem = count + } + + sliceRuns = append(sliceRuns, + rlepluslazy.Run{Val: false, Len: i}, + rlepluslazy.Run{Val: true, Len: rem}, + ) + outcount += rem + valsUntilStart = 0 + } + } else { + i += r.Len + } + } + + for iter.HasNext() && outcount < count { + r, err := iter.NextRun() + if err != nil { + return BitField{}, err + } + + if r.Val { + if r.Len <= count-outcount { + sliceRuns = append(sliceRuns, r) + outcount += r.Len + } else { + sliceRuns = append(sliceRuns, rlepluslazy.Run{Val: true, Len: count - outcount}) + outcount = count + } + } else { + if len(sliceRuns) == 0 { + r.Len += i + } + sliceRuns = append(sliceRuns, r) + } + } + if outcount < count { + return BitField{}, fmt.Errorf("not enough bits set in field to satisfy slice count") + } + + buf, err := rlepluslazy.EncodeRuns(&rlepluslazy.RunSliceIterator{Runs: sliceRuns}, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return BitField{}, err + } + + return BitField{rle: rle}, nil +} + +// IntersectBitField returns the intersection of the two BitFields. +// +// For example, given two BitFields: +// +// 0 1 1 0 1 +// 1 1 0 1 0 +// +// IntersectBitField would return +// +// 0 1 0 0 0 +// +// This operation's runtime is O(number of runs). +func IntersectBitField(a, b BitField) (BitField, error) { + ar, err := a.RunIterator() + if err != nil { + return BitField{}, err + } + + br, err := b.RunIterator() + if err != nil { + return BitField{}, err + } + + andIter, err := rlepluslazy.And(ar, br) + if err != nil { + return BitField{}, err + } + + buf, err := rlepluslazy.EncodeRuns(andIter, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return BitField{}, err + } + + return newWithRle(rle), nil +} + +// SubtractBitField returns the difference between the two BitFields. That is, +// it returns a bitfield of all bits set in a but not set in b. +// +// For example, given two BitFields: +// +// 0 1 1 0 1 // a +// 1 1 0 1 0 // b +// +// SubtractBitFields would return +// +// 0 0 1 0 1 +// +// This operation's runtime is O(number of runs). +func SubtractBitField(a, b BitField) (BitField, error) { + ar, err := a.RunIterator() + if err != nil { + return BitField{}, err + } + + br, err := b.RunIterator() + if err != nil { + return BitField{}, err + } + + andIter, err := rlepluslazy.Subtract(ar, br) + if err != nil { + return BitField{}, err + } + + buf, err := rlepluslazy.EncodeRuns(andIter, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return BitField{}, err + } + + return newWithRle(rle), nil +} + +// Copy flushes the bitfield and returns a copy that can be mutated +// without changing the original values +func (bf BitField) Copy() (BitField, error) { + r, err := bf.RunIterator() + if err != nil { + return BitField{}, err + } + + buf, err := rlepluslazy.EncodeRuns(r, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return BitField{}, err + } + + return newWithRle(rle), nil +} + +// BitIterator iterates over the bits in the bitmap +func (bf BitField) BitIterator() (rlepluslazy.BitIterator, error) { + r, err := bf.RunIterator() + if err != nil { + return nil, err + } + return rlepluslazy.BitsFromRuns(r) +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/go.mod b/vendor/github.com/filecoin-project/go-bitfield/go.mod new file mode 100644 index 0000000000..40fd22aa19 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/go.mod @@ -0,0 +1,10 @@ +module github.com/filecoin-project/go-bitfield + +go 1.13 + +require ( + github.com/ipfs/go-cid v0.0.5 // indirect + github.com/stretchr/testify v1.4.0 + github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) diff --git a/vendor/github.com/filecoin-project/go-bitfield/go.sum b/vendor/github.com/filecoin-project/go-bitfield/go.sum new file mode 100644 index 0000000000..60b96d700b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/go.sum @@ -0,0 +1,58 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go new file mode 100644 index 0000000000..cef81fdb45 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go @@ -0,0 +1,195 @@ +package rlepluslazy + +import ( + "errors" + "sort" +) + +var ErrEndOfIterator = errors.New("end of iterator") + +type it2b struct { + source RunIterator + curIdx uint64 + + run Run +} + +func (it *it2b) HasNext() bool { + return it.run.Valid() +} + +func (it *it2b) Next() (uint64, error) { + it.run.Len-- + res := it.curIdx + it.curIdx++ + return res, it.prep() +} + +func (it *it2b) Nth(n uint64) (uint64, error) { + skip := n + 1 + for it.run.Len < skip { + if !it.HasNext() { + return 0, ErrEndOfIterator + } + skip -= it.run.Len + it.curIdx += it.run.Len + it.run.Len = 0 + if err := it.prep(); err != nil { + return 0, err + } + } + it.run.Len -= skip + it.curIdx += skip + res := it.curIdx - 1 + return res, it.prep() +} + +func (it *it2b) prep() error { + for !it.run.Valid() && it.source.HasNext() { + var err error + it.run, err = it.source.NextRun() + if err != nil { + return err + } + + if !it.run.Val { + it.curIdx += it.run.Len + it.run.Len = 0 + } + } + return nil +} + +func BitsFromRuns(source RunIterator) (BitIterator, error) { + it := &it2b{source: source} + if err := it.prep(); err != nil { + return nil, err + } + return it, nil +} + +type sliceIt struct { + s []uint64 +} + +func (it sliceIt) HasNext() bool { + return len(it.s) != 0 +} + +func (it *sliceIt) Next() (uint64, error) { + if len(it.s) == 0 { + return 0, ErrEndOfIterator + } + res := it.s[0] + it.s = it.s[1:] + return res, nil +} + +func (it *sliceIt) Nth(n uint64) (uint64, error) { + if uint64(len(it.s)) <= n { + it.s = nil + return 0, ErrEndOfIterator + } + res := it.s[n] + it.s = it.s[n+1:] + return res, nil +} + +func BitsFromSlice(slice []uint64) BitIterator { + sort.Slice(slice, func(i, j int) bool { return slice[i] < slice[j] }) + return &sliceIt{slice} +} + +type it2r struct { + source BitIterator + + runIdx uint64 + run [2]Run +} + +func (it *it2r) HasNext() bool { + return it.run[0].Valid() +} + +func (it *it2r) NextRun() (Run, error) { + res := it.run[0] + it.runIdx = it.runIdx + res.Len + it.run[0], it.run[1] = it.run[1], Run{} + return res, it.prep() +} + +func (it *it2r) prep() error { + if !it.HasNext() { + return nil + } + if !it.run[0].Val { + it.run[1].Val = true + it.run[1].Len = 1 + return nil + } + + for it.source.HasNext() && !it.run[1].Valid() { + nB, err := it.source.Next() + if err != nil { + return err + } + + //fmt.Printf("runIdx: %d, run[0].Len: %d, nB: %d\n", it.runIdx, it.run[0].Len, nB) + if it.runIdx+it.run[0].Len == nB { + it.run[0].Len++ + } else { + it.run[1].Len = nB - it.runIdx - it.run[0].Len + it.run[1].Val = false + } + } + return nil +} + +func (it *it2r) init() error { + if it.source.HasNext() { + nB, err := it.source.Next() + if err != nil { + return err + } + it.run[0].Len = nB + it.run[0].Val = false + it.run[1].Len = 1 + it.run[1].Val = true + } + + if !it.run[0].Valid() { + it.run[0], it.run[1] = it.run[1], Run{} + return it.prep() + } + return nil +} + +func SliceFromRuns(source RunIterator) ([]uint64, error) { + rit, err := BitsFromRuns(source) + if err != nil { + return nil, err + } + + res := make([]uint64, 0) + for rit.HasNext() { + bit, err := rit.Next() + if err != nil { + return nil, err + } + res = append(res, bit) + } + return res, nil +} + +func RunsFromBits(source BitIterator) (RunIterator, error) { + it := &it2r{source: source} + + if err := it.init(); err != nil { + return nil, err + } + return it, nil +} + +func RunsFromSlice(slice []uint64) (RunIterator, error) { + return RunsFromBits(BitsFromSlice(slice)) +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go new file mode 100644 index 0000000000..df6f77ad39 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go @@ -0,0 +1,146 @@ +package rlepluslazy + +type rbitvec struct { + index int + + bits uint16 + bitCap byte + + vec []byte +} + +func readBitvec(vec []byte) *rbitvec { + bv := &rbitvec{ + vec: vec, + index: 1, + bitCap: 8, + } + if len(vec) > 0 { + bv.bits = uint16(bv.vec[0]) + } + return bv +} + +// bitMasks is a mask for selecting N first bits out of a byte +var bitMasks = [9]byte{ + 0x0, + 0x1, + 0x3, + 0x7, + 0xF, + 0x1F, + 0x3F, + 0x7F, + 0xFF, +} + +func (bv *rbitvec) GetByte() byte { + // Advancing byte by byte is simpler than advancing an odd number of + // bits because we _always_ load the next byte. + res := byte(bv.bits) + bv.bits >>= 8 + + if bv.index < len(bv.vec) { // if vector allows + // add bits onto the end of temporary storage + bv.bits |= uint16(bv.vec[bv.index]) << (bv.bitCap - 8) + } + + bv.index += 1 + return res +} + +func (bv *rbitvec) GetBit() bool { + // The specialized GetBit is easier for the compiler to optimize, for some reason. + + res := (bv.bits&0x1 != 0) + bv.bits >>= 1 + bv.bitCap -= 1 + + if bv.index < len(bv.vec) { // if vector allows + // add bits onto the end of temporary storage + bv.bits |= uint16(bv.vec[bv.index]) << bv.bitCap + } + + // When we advance one by one, this branch is very predictable (and + // faster than fancy math). + if bv.bitCap < 8 { + bv.index += 1 + bv.bitCap += 8 + } + return res +} + +func (bv *rbitvec) Get(count byte) byte { + res := byte(bv.bits) & bitMasks[count] // select count bits + bv.bits >>= count // remove those bits from storage + bv.bitCap -= count // decrease nuber of stored bits + + if bv.index < len(bv.vec) { // if vector allows + // add bits onto the end of temporary storage + bv.bits |= uint16(bv.vec[bv.index]) << bv.bitCap + } + + // Here be dragons + // This is equivalent to + // if bv.bitCap < 8 { + // bv.index++ + // bv.bitCap = bv.bitCap + 8 + // } + // but implemented without branches because the branch here is unpredictable + // Why this is without branches and reading has branch? + // Because branch above is predictable, in 99.99% of cases it will be true + + // if bitCap < 8 it underflows, then high bits get set to 1s + // we shift by 7 so the highest bit is in place of the lowest + inc := (bv.bitCap - 8) >> 7 // inc == 1 iff bitcap<8 (+10% perf) + bv.index += int(inc) // increase index if we need more bits + bv.bitCap += inc * 8 // increase bitCap by 8 + + return res +} + +func writeBitvec(buf []byte) *wbitvec { + // reslice to 0 length for consistent input but to keep capacity + return &wbitvec{buf: buf[:0]} +} + +type wbitvec struct { + buf []byte // buffer we will be saving to + + bits uint16 // temporary storage for bits + bitCap byte // number of bits stored in temporary storage +} + +// Returns the resulting bitvector, with any trailing zero bytes removed. +func (bv *wbitvec) Out() []byte { + if bv.bitCap != 0 { + // if there are some bits in temporary storage we need to save them + bv.buf = append(bv.buf, byte(bv.bits)) + } + if bv.bitCap > 8 { + // if we store some needed bits in second byte, save them also + bv.buf = append(bv.buf, byte(bv.bits>>8)) + } + bv.bitCap = 0 + bv.bits = 0 + + // Minimally encode. + for len(bv.buf) > 0 && bv.buf[len(bv.buf)-1] == 0 { + bv.buf = bv.buf[:len(bv.buf)-1] + } + + return bv.buf +} + +func (bv *wbitvec) Put(val byte, count byte) { + // put val into its place in bv.bits + bv.bits = bv.bits | uint16(val)<= 8 { + bv.buf = append(bv.buf, byte(bv.bits)) + bv.bitCap -= 8 + bv.bits >>= 8 + } +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/interface.go b/vendor/github.com/filecoin-project/go-bitfield/rle/interface.go new file mode 100644 index 0000000000..f76de1d189 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/interface.go @@ -0,0 +1,25 @@ +package rlepluslazy + +type Run struct { + Val bool + Len uint64 +} + +func (r Run) Valid() bool { + return r.Len != 0 +} + +type RunIterator interface { + NextRun() (Run, error) + HasNext() bool +} + +type RunIterable interface { + RunIterator() (RunIterator, error) +} + +type BitIterator interface { + Next() (uint64, error) + Nth(n uint64) (uint64, error) + HasNext() bool +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose.go b/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose.go new file mode 100644 index 0000000000..1fd85bb362 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose.go @@ -0,0 +1,65 @@ +package rlepluslazy + +func JoinClose(it RunIterator, closeness uint64) (RunIterator, error) { + jc := &jcIt{ + it: &peekIter{it: it}, + closeness: closeness, + } + if err := jc.prep(); err != nil { + return nil, err + } + return jc, nil +} + +type jcIt struct { + it *peekIter + run Run + + closeness uint64 +} + +func (jc *jcIt) prep() error { + if !jc.it.HasNext() { + jc.run = Run{} + return nil + } + + var err error + jc.run, err = jc.it.NextRun() + if err != nil { + return err + } + + if jc.run.Val { + for { + if jc.it.HasNext() { + run, err := jc.it.NextRun() + if err != nil { + return err + } + if run.Len <= jc.closeness || run.Val { + jc.run.Len += run.Len + continue + } else { + jc.it.put(run, err) + break + } + } + break + } + } + return nil +} + +func (jc *jcIt) HasNext() bool { + return jc.run.Valid() +} + +func (jc *jcIt) NextRun() (Run, error) { + out := jc.run + if err := jc.prep(); err != nil { + return Run{}, err + } + return out, nil + +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/merge.go b/vendor/github.com/filecoin-project/go-bitfield/rle/merge.go new file mode 100644 index 0000000000..9c18f4bfcc --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/merge.go @@ -0,0 +1,31 @@ +package rlepluslazy + +// Union returns the union of the passed iterators. Internally, this calls Or on +// the passed iterators, combining them with a binary tree of Ors. +func Union(iters ...RunIterator) (RunIterator, error) { + if len(iters) == 0 { + return RunsFromSlice(nil) + } + + for len(iters) > 1 { + var next []RunIterator + + for i := 0; i < len(iters); i += 2 { + if i+1 >= len(iters) { + next = append(next, iters[i]) + continue + } + + orit, err := Or(iters[i], iters[i+1]) + if err != nil { + return nil, err + } + + next = append(next, orit) + } + + iters = next + } + + return iters[0], nil +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go new file mode 100644 index 0000000000..fe19c17971 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go @@ -0,0 +1,150 @@ +package rlepluslazy + +import ( + "encoding/json" + "errors" + "fmt" + "math" + + "golang.org/x/xerrors" +) + +const Version = 0 + +var ( + ErrWrongVersion = errors.New("invalid RLE+ version") + ErrDecode = fmt.Errorf("invalid encoding for RLE+ version %d", Version) +) + +type RLE struct { + buf []byte + validated bool +} + +func FromBuf(buf []byte) (RLE, error) { + rle := RLE{buf: buf} + + if len(buf) > 0 && buf[0]&3 != Version { + return RLE{}, xerrors.Errorf("could not create RLE+ for a buffer: %w", ErrWrongVersion) + } + + return rle, nil +} + +// Bytes returns the encoded RLE. +// +// Do not modify. +func (rle *RLE) Bytes() []byte { + return rle.buf +} + +// Validate is a separate function to show up on profile for repeated decode evaluation +func (rle *RLE) Validate() error { + if !rle.validated { + source, err := DecodeRLE(rle.buf) + if err != nil { + return xerrors.Errorf("decoding RLE: %w", err) + } + var length uint64 + + for source.HasNext() { + r, err := source.NextRun() + if err != nil { + return xerrors.Errorf("reading run: %w", err) + } + if math.MaxUint64-r.Len < length { + return xerrors.New("RLE+ overflows") + } + length += r.Len + } + rle.validated = true + } + return nil +} + +func (rle *RLE) RunIterator() (RunIterator, error) { + err := rle.Validate() + if err != nil { + return nil, xerrors.Errorf("validation failed: %w", err) + } + + source, err := DecodeRLE(rle.buf) + if err != nil { + return nil, xerrors.Errorf("decoding RLE: %w", err) + } + + return source, nil +} + +func (rle *RLE) Count() (uint64, error) { + it, err := rle.RunIterator() + if err != nil { + return 0, err + } + return Count(it) +} + +// Encoded as an array of run-lengths, always starting with zeroes (absent values) +// E.g.: The set {0, 1, 2, 8, 9} is the bitfield 1110000011, and would be marshalled as [0, 3, 5, 2] +func (rle *RLE) MarshalJSON() ([]byte, error) { + r, err := rle.RunIterator() + if err != nil { + return nil, err + } + + var ret []uint64 + if r.HasNext() { + first, err := r.NextRun() + if err != nil { + return nil, err + } + if first.Val { + ret = append(ret, 0) + } + ret = append(ret, first.Len) + + for r.HasNext() { + next, err := r.NextRun() + if err != nil { + return nil, err + } + + ret = append(ret, next.Len) + } + } else { + ret = []uint64{0} + } + + return json.Marshal(ret) +} + +func (rle *RLE) UnmarshalJSON(b []byte) error { + var buf []uint64 + + if err := json.Unmarshal(b, &buf); err != nil { + return err + } + + runs := []Run{} + val := false + for i, v := range buf { + if v == 0 { + if i != 0 { + return xerrors.New("Cannot have a zero-length run except at start") + } + } else { + runs = append(runs, Run{ + Val: val, + Len: v, + }) + } + val = !val + } + enc, err := EncodeRuns(&RunSliceIterator{Runs: runs}, []byte{}) + if err != nil { + return xerrors.Errorf("encoding runs: %w", err) + } + rle.buf = enc + + return nil +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go new file mode 100644 index 0000000000..fe824e6ceb --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go @@ -0,0 +1,78 @@ +package rlepluslazy + +import ( + "golang.org/x/xerrors" +) + +func DecodeRLE(buf []byte) (RunIterator, error) { + if len(buf) > 0 && buf[len(buf)-1] == 0 { + // trailing zeros bytes not allowed. + return nil, xerrors.Errorf("not minimally encoded: %w", ErrDecode) + } + + bv := readBitvec(buf) + + ver := bv.Get(2) // Read version + if ver != Version { + return nil, ErrWrongVersion + } + + it := &rleIterator{bv: bv} + + // next run is previous in relation to prep + // so we invert the value + it.nextRun.Val = bv.Get(1) != 1 + if err := it.prep(); err != nil { + return nil, err + } + return it, nil +} + +type rleIterator struct { + bv *rbitvec + + nextRun Run +} + +func (it *rleIterator) HasNext() bool { + return it.nextRun.Valid() +} + +func (it *rleIterator) NextRun() (Run, error) { + ret := it.nextRun + return ret, it.prep() +} + +func (it *rleIterator) prep() error { + if it.bv.GetBit() { + it.nextRun.Len = 1 + } else if it.bv.GetBit() { + it.nextRun.Len = uint64(it.bv.Get(4)) + } else { + // Modified from the go standard library. Copyright the Go Authors and + // released under the BSD License. + var x uint64 + var s uint + for i := 0; ; i++ { + if i == 10 { + return xerrors.Errorf("run too long: %w", ErrDecode) + } + b := it.bv.GetByte() + if b < 0x80 { + if i > 9 || i == 9 && b > 1 { + return xerrors.Errorf("run too long: %w", ErrDecode) + } else if b == 0 && s > 0 { + return xerrors.Errorf("invalid run: %w", ErrDecode) + } + x |= uint64(b) << s + break + } + x |= uint64(b&0x7f) << s + s += 7 + } + it.nextRun.Len = x + } + + it.nextRun.Val = !it.nextRun.Val + return nil +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_writer.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_writer.go new file mode 100644 index 0000000000..4e1b7661aa --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_writer.go @@ -0,0 +1,63 @@ +package rlepluslazy + +import ( + "encoding/binary" + "errors" +) + +var ErrSameValRuns = errors.New("2 consecutive runs with the same value") + +func EncodeRuns(rit RunIterator, buf []byte) ([]byte, error) { + rit = newNormIter(rit) + + bv := writeBitvec(buf) + bv.Put(0, 2) + + first := true + varBuf := make([]byte, binary.MaxVarintLen64) + prev := false + + for rit.HasNext() { + run, err := rit.NextRun() + if err != nil { + return nil, err + } + + if first { + if run.Val { + bv.Put(1, 1) + } else { + bv.Put(0, 1) + } + prev = run.Val + first = false + } else { + if prev == run.Val { + return nil, ErrSameValRuns + } + prev = run.Val + } + + switch { + case run.Len == 1: + bv.Put(1, 1) + case run.Len < 16: + bv.Put(2, 2) + bv.Put(byte(run.Len), 4) + case run.Len >= 16: + bv.Put(0, 2) + numBytes := binary.PutUvarint(varBuf, run.Len) + for i := 0; i < numBytes; i++ { + bv.Put(varBuf[i], 8) + } + } + + } + + if first { + bv.Put(0, 1) + } + + return bv.Out(), nil + +} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go b/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go new file mode 100644 index 0000000000..69dc36e540 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go @@ -0,0 +1,377 @@ +package rlepluslazy + +import ( + "fmt" + "math" + + "golang.org/x/xerrors" +) + +func Or(a, b RunIterator) (RunIterator, error) { + it := addIt{a: a, b: b} + return &it, it.prep() +} + +type addIt struct { + a RunIterator + b RunIterator + + next Run + + arun Run + brun Run +} + +func (it *addIt) prep() error { + var err error + + fetch := func() error { + if !it.arun.Valid() && it.a.HasNext() { + it.arun, err = it.a.NextRun() + if err != nil { + return err + } + } + + if !it.brun.Valid() && it.b.HasNext() { + it.brun, err = it.b.NextRun() + if err != nil { + return err + } + } + return nil + } + + if err := fetch(); err != nil { + return err + } + + // one is not valid + if !it.arun.Valid() { + it.next = it.brun + it.brun.Len = 0 + return nil + } + + if !it.brun.Valid() { + it.next = it.arun + it.arun.Len = 0 + return nil + } + + if !it.arun.Val && !it.brun.Val { + min := it.arun.Len + if it.brun.Len < min { + min = it.brun.Len + } + it.next = Run{Val: it.arun.Val, Len: min} + it.arun.Len -= it.next.Len + it.brun.Len -= it.next.Len + + if err := fetch(); err != nil { + return err + } + trailingRun := func(r1, r2 Run) bool { + return !r1.Valid() && r2.Val == it.next.Val + } + if trailingRun(it.arun, it.brun) || trailingRun(it.brun, it.arun) { + it.next.Len += it.arun.Len + it.next.Len += it.brun.Len + it.arun.Len = 0 + it.brun.Len = 0 + } + + return nil + } + + it.next = Run{Val: true} + // different vals, 'true' wins + for (it.arun.Val && it.arun.Valid()) || (it.brun.Val && it.brun.Valid()) { + min := it.arun.Len + if it.brun.Len < min && it.brun.Valid() || !it.arun.Valid() { + min = it.brun.Len + } + it.next.Len += min + if it.arun.Valid() { + it.arun.Len -= min + } + if it.brun.Valid() { + it.brun.Len -= min + } + if err := fetch(); err != nil { + return err + } + } + + return nil +} + +func (it *addIt) HasNext() bool { + return it.next.Valid() +} + +func (it *addIt) NextRun() (Run, error) { + next := it.next + return next, it.prep() +} + +func Count(ri RunIterator) (uint64, error) { + var length uint64 + var count uint64 + + for ri.HasNext() { + r, err := ri.NextRun() + if err != nil { + return 0, err + } + + if math.MaxUint64-r.Len < length { + return 0, xerrors.New("RLE+ overflows") + } + length += r.Len + + if r.Val { + count += r.Len + } + } + return count, nil +} + +func IsSet(ri RunIterator, x uint64) (bool, error) { + var i uint64 + for ri.HasNext() { + r, err := ri.NextRun() + if err != nil { + return false, err + } + + if i+r.Len > x { + return r.Val, nil + } + + i += r.Len + } + return false, nil +} + +func min(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +type andIter struct { + a, b RunIterator + ar, br Run +} + +func (ai *andIter) HasNext() bool { + return (ai.ar.Valid() || ai.a.HasNext()) && (ai.br.Valid() || ai.b.HasNext()) +} + +func (ai *andIter) NextRun() (run Run, err error) { + for { + // Ensure we have two valid runs. + if !ai.ar.Valid() { + if !ai.a.HasNext() { + break + } + ai.ar, err = ai.a.NextRun() + if err != nil { + return Run{}, err + } + } + + if !ai.br.Valid() { + if !ai.b.HasNext() { + break + } + ai.br, err = ai.b.NextRun() + if err != nil { + return Run{}, err + } + } + + // && + newVal := ai.ar.Val && ai.br.Val + + // Check to see if we have an ongoing run and if we've changed + // value. + if run.Len > 0 && run.Val != newVal { + return run, nil + } + + newLen := min(ai.ar.Len, ai.br.Len) + + run.Val = newVal + run.Len += newLen + ai.ar.Len -= newLen + ai.br.Len -= newLen + } + + if run.Valid() { + return run, nil + } + + return Run{}, fmt.Errorf("end of runs") +} + +func And(a, b RunIterator) (RunIterator, error) { + return &andIter{a: a, b: b}, nil +} + +type RunSliceIterator struct { + Runs []Run + i int +} + +func (ri *RunSliceIterator) HasNext() bool { + return ri.i < len(ri.Runs) +} + +func (ri *RunSliceIterator) NextRun() (Run, error) { + if ri.i >= len(ri.Runs) { + return Run{}, fmt.Errorf("end of runs") + } + + out := ri.Runs[ri.i] + ri.i++ + return out, nil +} + +type notIter struct { + it RunIterator +} + +func (ni *notIter) HasNext() bool { + return true +} + +func (ni *notIter) NextRun() (Run, error) { + if !ni.it.HasNext() { + return Run{ + Val: true, + Len: 40_000_000_000_000, // close enough to infinity + }, nil + } + + nr, err := ni.it.NextRun() + if err != nil { + return Run{}, err + } + + nr.Val = !nr.Val + return nr, nil +} + +func Subtract(a, b RunIterator) (RunIterator, error) { + return And(a, ¬Iter{it: b}) +} + +type nextRun struct { + set bool + run Run + err error +} + +type peekIter struct { + it RunIterator + stash nextRun +} + +func (it *peekIter) HasNext() bool { + if it.stash.set { + return true + } + return it.it.HasNext() +} + +func (it *peekIter) NextRun() (Run, error) { + if it.stash.set { + run := it.stash.run + err := it.stash.err + it.stash = nextRun{} + return run, err + } + + return it.it.NextRun() +} + +func (it *peekIter) put(run Run, err error) { + it.stash = nextRun{ + set: true, + run: run, + err: err, + } +} + +// normIter trims the last run of 0s +type normIter struct { + it *peekIter +} + +func newNormIter(it RunIterator) *normIter { + if nit, ok := it.(*normIter); ok { + return nit + } + return &normIter{ + it: &peekIter{ + it: it, + }, + } +} + +func (it *normIter) HasNext() bool { + if !it.it.HasNext() { + return false + } + + // check if this is the last run + cur, err := it.it.NextRun() + if err != nil { + it.it.put(cur, err) + return true + } + + notLast := it.it.HasNext() + it.it.put(cur, err) + if notLast { + return true + } + + return cur.Val +} + +func (it *normIter) NextRun() (Run, error) { + return it.it.NextRun() +} + +// Returns iterator with all bits up to the last bit set: +// in: 11100000111010001110000 +// out: 1111111111111111111 +func Fill(iter RunIterator) (RunIterator, error) { + var at, length uint64 + for iter.HasNext() { + r, err := iter.NextRun() + if err != nil { + return nil, err + } + + at += r.Len + + if r.Val { + length = at + } + } + + var runs []Run + if length > 0 { + runs = append(runs, Run{ + Val: true, + Len: length, + }) + } + + return &RunSliceIterator{Runs: runs}, nil +} diff --git a/vendor/github.com/filecoin-project/go-cbor-util/.circleci/config.yml b/vendor/github.com/filecoin-project/go-cbor-util/.circleci/config.yml new file mode 100644 index 0000000000..521d7fb390 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/.circleci/config.yml @@ -0,0 +1,161 @@ +version: 2.1 +orbs: + go: gotest/tools@0.0.9 + +executors: + golang: + docker: + - image: circleci/golang:1.13 + resource_class: small + +commands: + install-deps: + steps: + - go/install-ssh + - go/install: {package: git} + prepare: + parameters: + linux: + default: true + description: is a linux build environment? + type: boolean + steps: + - checkout + - when: + condition: << parameters.linux >> + steps: + - run: sudo apt-get update + - run: sudo apt-get install ocl-icd-opencl-dev + build-all: + + +jobs: + mod-tidy-check: + executor: golang + steps: + - install-deps + - prepare + - go/mod-download + - go/mod-tidy-check + + test: &test + description: | + Run tests with gotestsum. + parameters: + executor: + type: executor + default: golang + go-test-flags: + type: string + default: "-timeout 5m" + description: Flags passed to go test. + packages: + type: string + default: "./..." + description: Import paths of packages to be tested. + test-suite-name: + type: string + default: unit + description: Test suite name to report to CircleCI. + gotestsum-format: + type: string + default: short + description: gotestsum format. https://github.com/gotestyourself/gotestsum#format + coverage: + type: string + default: -coverprofile=coverage.txt + description: Coverage flag. Set to the empty string to disable. + codecov-upload: + type: boolean + default: false + description: | + Upload coverage report to https://codecov.io/. Requires the codecov API token to be + set as an environment variable for private projects. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - go/mod-download + - restore_cache: + name: restore go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + - go/install-gotestsum: + gobin: $HOME/.local/bin + - run: + name: go test + environment: + GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml + GOTESTSUM_FORMAT: << parameters.gotestsum-format >> + command: | + mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> + gotestsum -- \ + << parameters.coverage >> \ + << parameters.go-test-flags >> \ + << parameters.packages >> + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - when: + condition: << parameters.codecov-upload >> + steps: + - go/install: {package: bash} + - go/install: {package: curl} + - run: + shell: /bin/bash -eo pipefail + command: | + bash <(curl -s https://codecov.io/bash) + - save_cache: + name: save go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + paths: + - "~/go/pkg" + - "~/go/src/github.com" + - "~/go/src/golang.org" + + lint: &lint + description: | + Run golangci-lint. + parameters: + executor: + type: executor + default: golang + golangci-lint-version: + type: string + default: 1.21.0 + concurrency: + type: string + default: '2' + description: | + Concurrency used to run linters. Defaults to 2 because NumCPU is not + aware of container CPU limits. + args: + type: string + default: '' + description: | + Arguments to pass to golangci-lint + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - go/install-golangci-lint: + gobin: $HOME/.local/bin + version: << parameters.golangci-lint-version >> + - run: + name: Lint + command: | + $HOME/.local/bin/golangci-lint run -v \ + --concurrency << parameters.concurrency >> << parameters.args >> + lint-changes: + <<: *lint + + lint-all: + <<: *lint + +workflows: + version: 2.1 + ci: + jobs: + - lint-changes: + args: "--new-from-rev origin/master" + - test + - mod-tidy-check diff --git a/vendor/github.com/filecoin-project/go-cbor-util/CONTRIBUTING.md b/vendor/github.com/filecoin-project/go-cbor-util/CONTRIBUTING.md new file mode 100644 index 0000000000..cd329d7929 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/CONTRIBUTING.md @@ -0,0 +1,52 @@ +# Contributing to this repo + +First, thank you for your interest in contributing to this project! Before you pick up your first issue and start +changing code, please: + +1. Review all documentation for the module you're interested in. +1. Look through the [issues for this repo](https://github.com/filecoin-project/go-cbor-util/issues) for relevant discussions. +1. If you have questions about an issue, post a comment in the issue. +1. If you want to submit changes that aren't covered by an issue, file a new one with your proposal, outlining what problem you found/feature you want to implement, and how you intend to implement a solution. + +For best results, before submitting a PR, make sure: +1. It has met all acceptance criteria for the issue. +1. It addresses only the one issue and does not make other, irrelevant changes. +1. Your code conforms to our coding style guide. +1. You have adequate test coverage (this should be indicated by CI results anyway). +1. If you like, check out [current PRs](https://github.com/filecoin-project/go-cbor-util/pulls) to see how others do it. + +Special Note: +If editing README.md, please conform to the [standard readme specification](https://github.com/RichardLitt/standard-readme/blob/master/spec.md). + +Before a PR can be merged to `master`, it must: +1. Pass continuous integration. +1. Be approved by at least two maintainers + +### Testing + +- All new code should be accompanied by unit tests. Prefer focused unit tests to integration tests for thorough validation of behaviour. Existing code is not necessarily a good model, here. +- Integration tests should test integration, not comprehensive functionality +- Tests should be placed in a separate package named `$PACKAGE_test`. For example, a test of the `chain` package should live in a package named `chain_test`. In limited situations, exceptions may be made for some "white box" tests placed in the same package as the code it tests. + +### Conventions and Style + +#### Imports +We use the following import ordering. +``` +import ( + [stdlib packages, alpha-sorted] + + [external packages] + + [go-cbor-util packages] +) +``` + +Where a package name does not match its directory name, an explicit alias is expected (`goimports` will add this for you). + +#### Comments +Comments are a communication to other developers (including your future self) to help them understand and maintain code. Good comments describe the _intent_ of the code, without repeating the procedures directly. + +- A `TODO:` comment describes a change that is desired but could not be immediately implemented. It must include a reference to a GitHub issue outlining whatever prevents the thing being done now (which could just be a matter of priority). +- A `NOTE:` comment indicates an aside, some background info, or ideas for future improvement, rather than the intent of the current code. It's often fine to document such ideas alongside the code rather than an issue (at the loss of a space for discussion). +- `FIXME`, `HACK`, `XXX` and similar tags indicating that some code is to be avoided in favour of `TODO`, `NOTE` or some straight prose. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-cbor-util/COPYRIGHT b/vendor/github.com/filecoin-project/go-cbor-util/COPYRIGHT new file mode 100644 index 0000000000..771e6f7cd7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-cbor-util/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-cbor-util/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-cbor-util/LICENSE-MIT b/vendor/github.com/filecoin-project/go-cbor-util/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-cbor-util/README.md b/vendor/github.com/filecoin-project/go-cbor-util/README.md new file mode 100644 index 0000000000..b0fc8d5373 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/README.md @@ -0,0 +1,56 @@ +# go-cbor-util +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![CircleCI](https://circleci.com/gh/filecoin-project/go-cbor-util.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-cbor-util) +[![codecov](https://codecov.io/gh/filecoin-project/go-cbor-util/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-cbor-util) + +CBOR utilities for reading and writing objects to CBOR representation, optimizing for fast path serialization/deserialization generated by cbor-gen + +## Install + +Install this library with `go mod` + +## Usage + +Write an object to a stream in cbor + +```golang +import ( + cborutil "github.com/filecoin-project/go-cbor-util" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var w io.Writer +// some object type with cbg fastpath marshalling +var out cbg.CBORMarshaler +err := cborutil.WriteCborRPC(w, obj) + +var slow interface{} +// will work but will be slower if slow does not support fast path marshalling +err := cborutil.WriteCborRPC(w, slow) +``` + +Read an object form a stream in cbor + +```golang +import ( + cborutil "github.com/filecoin-project/go-cbor-util" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var r io.Reader +// some object type with cbg fastpath marshalling +var out cbg.CBORUnmarshaler +err := cborutil.ReadCborRPC(r, obj) + +var slow interface{} +// will work but will be slower if slow does not support fast path unmarshalling +err := cborutil.ReadCborRPC(r, slow) +``` + +## Project-level documentation +The filecoin-project has a [community repo](https://github.com/filecoin-project/community) that documents in more detail our policies and guidelines, such as discussion forums and chat rooms and [Code of Conduct](https://github.com/filecoin-project/community/blob/master/CODE_OF_CONDUCT.md). + +## License +This repository is dual-licensed under Apache 2.0 and MIT terms. + +Copyright 2019. Protocol Labs, Inc. diff --git a/vendor/github.com/filecoin-project/go-cbor-util/go.mod b/vendor/github.com/filecoin-project/go-cbor-util/go.mod new file mode 100644 index 0000000000..9ef44cfdc0 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/go.mod @@ -0,0 +1,10 @@ +module github.com/filecoin-project/go-cbor-util + +go 1.13 + +require ( + github.com/ipfs/go-ipld-cbor v0.0.3 + github.com/ipfs/go-ipld-format v0.0.2 + github.com/ipfs/go-log v1.0.0 + github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0 +) diff --git a/vendor/github.com/filecoin-project/go-cbor-util/go.sum b/vendor/github.com/filecoin-project/go-cbor-util/go.sum new file mode 100644 index 0000000000..be803b6f0c --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/go.sum @@ -0,0 +1,77 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-log v1.0.0 h1:BW3LQIiZzpNyolt84yvKNCd3FU+AK4VDw1hnHR+1aiI= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0 h1:efb/4CnrubzNGqQOeHErxyQ6rIsJb7GcgeSDF7fqWeI= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/filecoin-project/go-cbor-util/rpc.go b/vendor/github.com/filecoin-project/go-cbor-util/rpc.go new file mode 100644 index 0000000000..d2c4e214eb --- /dev/null +++ b/vendor/github.com/filecoin-project/go-cbor-util/rpc.go @@ -0,0 +1,88 @@ +package cborutil + +import ( + "bytes" + "encoding/hex" + "io" + "math" + + cbor "github.com/ipfs/go-ipld-cbor" + ipld "github.com/ipfs/go-ipld-format" + logging "github.com/ipfs/go-log" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var log = logging.Logger("cborrrpc") + +// Debug will produce more debugging messages +const Debug = false + +func init() { + if Debug { + log.Warn("CBOR-RPC Debugging enabled") + } +} + +// WriteCborRPC with encode an object to cbor, opting for fast path if possible +// and then write it into the given io.Writer +func WriteCborRPC(w io.Writer, obj interface{}) error { + if m, ok := obj.(cbg.CBORMarshaler); ok { + // TODO: impl debug + return m.MarshalCBOR(w) + } + data, err := cbor.DumpObject(obj) + if err != nil { + return err + } + + if Debug { + log.Infof("> %s", hex.EncodeToString(data)) + } + + _, err = w.Write(data) + return err +} + +// ReadCborRPC will read an object from the given io.Reader +// opting for fast path if possible +func ReadCborRPC(r io.Reader, out interface{}) error { + if um, ok := out.(cbg.CBORUnmarshaler); ok { + return um.UnmarshalCBOR(r) + } + return cbor.DecodeReader(r, out) +} + +// Dump returns the cbor bytes representation of an object +func Dump(obj interface{}) ([]byte, error) { + var out bytes.Buffer + if err := WriteCborRPC(&out, obj); err != nil { + return nil, err + } + return out.Bytes(), nil +} + +// AsIpld converts an object to an ipld.Node interface +// TODO: this is a bit ugly, and this package is not exactly the best place +func AsIpld(obj interface{}) (ipld.Node, error) { + if m, ok := obj.(cbg.CBORMarshaler); ok { + b, err := Dump(m) + if err != nil { + return nil, err + } + return cbor.Decode(b, math.MaxUint64, -1) + } + return cbor.WrapObject(obj, math.MaxUint64, -1) +} + +// Equals is true if two objects have the same cbor representation +func Equals(a cbg.CBORMarshaler, b cbg.CBORMarshaler) (bool, error) { + ab, err := Dump(a) + if err != nil { + return false, err + } + bb, err := Dump(b) + if err != nil { + return false, err + } + return bytes.Equal(ab, bb), nil +} diff --git a/vendor/github.com/filecoin-project/go-crypto/.circleci/config.yml b/vendor/github.com/filecoin-project/go-crypto/.circleci/config.yml new file mode 100644 index 0000000000..521d7fb390 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/.circleci/config.yml @@ -0,0 +1,161 @@ +version: 2.1 +orbs: + go: gotest/tools@0.0.9 + +executors: + golang: + docker: + - image: circleci/golang:1.13 + resource_class: small + +commands: + install-deps: + steps: + - go/install-ssh + - go/install: {package: git} + prepare: + parameters: + linux: + default: true + description: is a linux build environment? + type: boolean + steps: + - checkout + - when: + condition: << parameters.linux >> + steps: + - run: sudo apt-get update + - run: sudo apt-get install ocl-icd-opencl-dev + build-all: + + +jobs: + mod-tidy-check: + executor: golang + steps: + - install-deps + - prepare + - go/mod-download + - go/mod-tidy-check + + test: &test + description: | + Run tests with gotestsum. + parameters: + executor: + type: executor + default: golang + go-test-flags: + type: string + default: "-timeout 5m" + description: Flags passed to go test. + packages: + type: string + default: "./..." + description: Import paths of packages to be tested. + test-suite-name: + type: string + default: unit + description: Test suite name to report to CircleCI. + gotestsum-format: + type: string + default: short + description: gotestsum format. https://github.com/gotestyourself/gotestsum#format + coverage: + type: string + default: -coverprofile=coverage.txt + description: Coverage flag. Set to the empty string to disable. + codecov-upload: + type: boolean + default: false + description: | + Upload coverage report to https://codecov.io/. Requires the codecov API token to be + set as an environment variable for private projects. + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - go/mod-download + - restore_cache: + name: restore go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + - go/install-gotestsum: + gobin: $HOME/.local/bin + - run: + name: go test + environment: + GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml + GOTESTSUM_FORMAT: << parameters.gotestsum-format >> + command: | + mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> + gotestsum -- \ + << parameters.coverage >> \ + << parameters.go-test-flags >> \ + << parameters.packages >> + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - when: + condition: << parameters.codecov-upload >> + steps: + - go/install: {package: bash} + - go/install: {package: curl} + - run: + shell: /bin/bash -eo pipefail + command: | + bash <(curl -s https://codecov.io/bash) + - save_cache: + name: save go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + paths: + - "~/go/pkg" + - "~/go/src/github.com" + - "~/go/src/golang.org" + + lint: &lint + description: | + Run golangci-lint. + parameters: + executor: + type: executor + default: golang + golangci-lint-version: + type: string + default: 1.21.0 + concurrency: + type: string + default: '2' + description: | + Concurrency used to run linters. Defaults to 2 because NumCPU is not + aware of container CPU limits. + args: + type: string + default: '' + description: | + Arguments to pass to golangci-lint + executor: << parameters.executor >> + steps: + - install-deps + - prepare + - go/install-golangci-lint: + gobin: $HOME/.local/bin + version: << parameters.golangci-lint-version >> + - run: + name: Lint + command: | + $HOME/.local/bin/golangci-lint run -v \ + --concurrency << parameters.concurrency >> << parameters.args >> + lint-changes: + <<: *lint + + lint-all: + <<: *lint + +workflows: + version: 2.1 + ci: + jobs: + - lint-changes: + args: "--new-from-rev origin/master" + - test + - mod-tidy-check diff --git a/vendor/github.com/filecoin-project/go-crypto/COPYRIGHT b/vendor/github.com/filecoin-project/go-crypto/COPYRIGHT new file mode 100644 index 0000000000..771e6f7cd7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-crypto/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-crypto/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-crypto/LICENSE-MIT b/vendor/github.com/filecoin-project/go-crypto/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-crypto/README.md b/vendor/github.com/filecoin-project/go-crypto/README.md new file mode 100644 index 0000000000..8050daead9 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/README.md @@ -0,0 +1,8 @@ +# go-crypto + +Crypto utility functions used in filecoin + +## License +This repository is dual-licensed under Apache 2.0 and MIT terms. + +Copyright 2019. Protocol Labs, Inc. diff --git a/vendor/github.com/filecoin-project/go-crypto/crypto.go b/vendor/github.com/filecoin-project/go-crypto/crypto.go new file mode 100644 index 0000000000..7ef5d6718a --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/crypto.go @@ -0,0 +1,70 @@ +package crypto + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "io" + + secp256k1 "github.com/ethereum/go-ethereum/crypto/secp256k1" +) + +// PrivateKeyBytes is the size of a serialized private key. +const PrivateKeyBytes = 32 + +// PublicKeyBytes is the size of a serialized public key. +const PublicKeyBytes = 65 + +// PublicKey returns the public key for this private key. +func PublicKey(sk []byte) []byte { + x, y := secp256k1.S256().ScalarBaseMult(sk) + return elliptic.Marshal(secp256k1.S256(), x, y) +} + +// Sign signs the given message, which must be 32 bytes long. +func Sign(sk, msg []byte) ([]byte, error) { + return secp256k1.Sign(msg, sk) +} + +// Equals compares two private key for equality and returns true if they are the same. +func Equals(sk, other []byte) bool { + return bytes.Equal(sk, other) +} + +// Verify checks the given signature and returns true if it is valid. +func Verify(pk, msg, signature []byte) bool { + if len(signature) == 65 { + // Drop the V (1byte) in [R | S | V] style signatures. + // The V (1byte) is the recovery bit and is not apart of the signature verification. + return secp256k1.VerifySignature(pk[:], msg, signature[:len(signature)-1]) + } + + return secp256k1.VerifySignature(pk[:], msg, signature) +} + +// GenerateKeyFromSeed generates a new key from the given reader. +func GenerateKeyFromSeed(seed io.Reader) ([]byte, error) { + key, err := ecdsa.GenerateKey(secp256k1.S256(), seed) + if err != nil { + return nil, err + } + + privkey := make([]byte, PrivateKeyBytes) + blob := key.D.Bytes() + + // the length is guaranteed to be fixed, given the serialization rules for secp2561k curve points. + copy(privkey[PrivateKeyBytes-len(blob):], blob) + + return privkey, nil +} + +// GenerateKey creates a new key using secure randomness from crypto.rand. +func GenerateKey() ([]byte, error) { + return GenerateKeyFromSeed(rand.Reader) +} + +// EcRecover recovers the public key from a message, signature pair. +func EcRecover(msg, signature []byte) ([]byte, error) { + return secp256k1.RecoverPubkey(msg, signature) +} diff --git a/vendor/github.com/filecoin-project/go-crypto/crypto_test.go b/vendor/github.com/filecoin-project/go-crypto/crypto_test.go new file mode 100644 index 0000000000..6e20493ed0 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/crypto_test.go @@ -0,0 +1,61 @@ +package crypto_test + +import ( + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/filecoin-project/go-crypto" +) + +func TestGenerateKey(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + + sk, err := crypto.GenerateKey() + assert.NoError(t, err) + + assert.Equal(t, len(sk), 32) + + msg := make([]byte, 32) + for i := 0; i < len(msg); i++ { + msg[i] = byte(i) + } + + digest, err := crypto.Sign(sk, msg) + assert.NoError(t, err) + assert.Equal(t, len(digest), 65) + pk := crypto.PublicKey(sk) + + // valid signature + assert.True(t, crypto.Verify(pk, msg, digest)) + + // invalid signature - different message (too short) + assert.False(t, crypto.Verify(pk, msg[3:], digest)) + + // invalid signature - different message + msg2 := make([]byte, 32) + copy(msg2, msg) + rand.Shuffle(len(msg2), func(i, j int) { msg2[i], msg2[j] = msg2[j], msg2[i] }) + assert.False(t, crypto.Verify(pk, msg2, digest)) + + // invalid signature - different digest + digest2 := make([]byte, 65) + copy(digest2, digest) + rand.Shuffle(len(digest2), func(i, j int) { digest2[i], digest2[j] = digest2[j], digest2[i] }) + assert.False(t, crypto.Verify(pk, msg, digest2)) + + // invalid signature - digest too short + assert.False(t, crypto.Verify(pk, msg, digest[3:])) + assert.False(t, crypto.Verify(pk, msg, digest[:29])) + + // invalid signature - digest too long + digest3 := make([]byte, 70) + copy(digest3, digest) + assert.False(t, crypto.Verify(pk, msg, digest3)) + + recovered, err := crypto.EcRecover(msg, digest) + assert.NoError(t, err) + assert.Equal(t, recovered, crypto.PublicKey(sk)) +} diff --git a/vendor/github.com/filecoin-project/go-crypto/go.mod b/vendor/github.com/filecoin-project/go-crypto/go.mod new file mode 100644 index 0000000000..0a580200b5 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/go.mod @@ -0,0 +1,12 @@ +module github.com/filecoin-project/go-crypto + +go 1.13 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 + github.com/kr/pretty v0.1.0 // indirect + github.com/stretchr/testify v1.4.0 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/yaml.v2 v2.2.4 // indirect +) diff --git a/vendor/github.com/filecoin-project/go-crypto/go.sum b/vendor/github.com/filecoin-project/go-crypto/go.sum new file mode 100644 index 0000000000..bb23197e92 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-crypto/go.sum @@ -0,0 +1,21 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= +github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md b/vendor/github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md new file mode 100644 index 0000000000..eb4746b16c --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# Contributing to this repo + +First, thank you for your interest in contributing to this project! Before you pick up your first issue and start +changing code, please: + +1. Review all documentation for the module you're interested in. +1. Look through the [issues for this repo](https://github.com/filecoin-project/go-data-transfer/issues) for relevant discussions. +1. If you have questions about an issue, post a comment in the issue. +1. If you want to submit changes that aren't covered by an issue, file a new one with your proposal, outlining what problem you found/feature you want to implement, and how you intend to implement a solution. + +For best results, before submitting a PR, make sure: +1. It has met all acceptance criteria for the issue. +1. It addresses only the one issue and does not make other, irrelevant changes. +1. Your code conforms to our coding style guide. +1. You have adequate test coverage (this should be indicated by CI results anyway). +1. If you like, check out [current PRs](https://github.com/filecoin-project/go-data-transfer/pulls) to see how others do it. + +Special Note: +If editing README.md, please conform to the [standard readme specification](https://github.com/RichardLitt/standard-readme/blob/master/spec.md). + +Before a PR can be merged to `master`, it must: +1. Pass continuous integration. +1. Be approved by at least two maintainers + +### Testing + +- All new code should be accompanied by unit tests. Prefer focused unit tests to integration tests for thorough validation of behaviour. Existing code is not necessarily a good model, here. +- Integration tests should test integration, not comprehensive functionality +- Tests should be placed in a separate package named `$PACKAGE_test`. For example, a test of the `chain` package should live in a package named `chain_test`. In limited situations, exceptions may be made for some "white box" tests placed in the same package as the code it tests. + +### Conventions and Style + +#### Imports +We use the following import ordering. +``` +import ( + [stdlib packages, alpha-sorted] + + [external packages] + + [other-filecoin-project packages] + + [go-data-transfer packages] +) +``` + +Where a package name does not match its directory name, an explicit alias is expected (`goimports` will add this for you). + +Example: + +```go +package message + +import ( + "io" + + "github.com/ipfs/go-cid" + cborgen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-statemachine" + + datatransfer "github.com/filecoin-project/go-data-transfer" +) +``` + +You can run `script/fiximports` to put all your code in the desired format + +#### Comments +Comments are a communication to other developers (including your future self) to help them understand and maintain code. Good comments describe the _intent_ of the code, without repeating the procedures directly. + +- A `TODO:` comment describes a change that is desired but could not be immediately implemented. It must include a reference to a GitHub issue outlining whatever prevents the thing being done now (which could just be a matter of priority). +- A `NOTE:` comment indicates an aside, some background info, or ideas for future improvement, rather than the intent of the current code. It's often fine to document such ideas alongside the code rather than an issue (at the loss of a space for discussion). +- `FIXME`, `HACK`, `XXX` and similar tags indicating that some code is to be avoided in favour of `TODO`, `NOTE` or some straight prose. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-data-transfer/COPYRIGHT b/vendor/github.com/filecoin-project/go-data-transfer/COPYRIGHT new file mode 100644 index 0000000000..771e6f7cd7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-data-transfer/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-data-transfer/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-data-transfer/LICENSE-MIT b/vendor/github.com/filecoin-project/go-data-transfer/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-data-transfer/Makefile b/vendor/github.com/filecoin-project/go-data-transfer/Makefile new file mode 100644 index 0000000000..6ad9e7d874 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/Makefile @@ -0,0 +1,32 @@ +all: build +.PHONY: all + +GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2) +ifeq ($(shell expr $(GOVERSION) \< 13), 1) +$(warning Your Golang version is go 1.$(GOVERSION)) +$(error Update Golang to version $(shell grep '^go' go.mod)) +endif + +build: + go build ./... + +test: + go test ./... + +type-gen: build + go generate ./... + +imports: + scripts/fiximports + +cbor-gen: + go generate ./... + +tidy: + go mod tidy + +lint: + git fetch + golangci-lint run -v --concurrency 2 --new-from-rev origin/master + +prepare-pr: cbor-gen tidy imports lint \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-data-transfer/README.md b/vendor/github.com/filecoin-project/go-data-transfer/README.md new file mode 100644 index 0000000000..9783676141 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/README.md @@ -0,0 +1,183 @@ +# go-data-transfer +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![CircleCI](https://circleci.com/gh/filecoin-project/go-data-transfer.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-data-transfer) +[![codecov](https://codecov.io/gh/filecoin-project/go-data-transfer/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-data-transfer) + +A go module to perform data transfers over [ipfs/go-graphsync](https://github.com/ipfs/go-graphsync) + +## Description +This module encapsulates protocols for exchanging piece data between storage clients and miners, both when consummating a storage deal and when retrieving the piece later. + +## Table of Contents +* [Background](https://github.com/filecoin-project/go-data-transfer/tree/master#background) +* [Usage](https://github.com/filecoin-project/go-data-transfer/tree/master#usage) + * [Initialize a data transfer module](https://github.com/filecoin-project/go-data-transfer/tree/master#initialize-a-data-transfer-module) + * [Register a validator](https://github.com/filecoin-project/go-data-transfer/tree/master#register-a-validator) + * [Open a Push or Pull Request](https://github.com/filecoin-project/go-data-transfer/tree/master#open-a-push-or-pull-request) + * [Subscribe to Events](https://github.com/filecoin-project/go-data-transfer/tree/master#subscribe-to-events) +* [Contribute](https://github.com/filecoin-project/go-data-transfer/tree/master#contribute) + +## Background + +Please see the [design documentation](https://github.com/filecoin-project/go-data-transfer/tree/master/docs/DESIGNDOC) +for this module for a high-level overview and and explanation of the terms and concepts. + +## Usage + +**Requires go 1.13** + +Install the module in your package or app with `go get "github.com/filecoin-project/go-data-transfer/datatransfer"` + + +### Initialize a data transfer module +1. Set up imports. You need, minimally, the following imports: + ```go + package mypackage + + import ( + gsimpl "github.com/ipfs/go-graphsync/impl" + datatransfer "github.com/filecoin-project/go-data-transfer/impl" + gstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" + "github.com/libp2p/go-libp2p-core/host" + ) + + ``` +1. Provide or create a [libp2p host.Host](https://github.com/libp2p/go-libp2p-examples/tree/master/libp2p-host) +1. You will need a transport protocol. The current default transport is graphsync. [go-graphsync GraphExchange](https://github.com/ipfs/go-graphsync#initializing-a-graphsync-exchange) +1. Create a data transfer by building a transport interface and then initializing a new data transfer instance + ```go + func NewGraphsyncDataTransfer(h host.Host, gs graphsync.GraphExchange) { + tp := gstransport.NewTransport(h.ID(), gs) + dt := impl.NewDataTransfer(h, tp) + } + ``` + +1. If needed, build out your voucher struct and its validator. + + A push or pull request must include a voucher. The voucher's type must have been registered with + the node receiving the request before it's sent, otherwise the request will be rejected. + + [datatransfer.Voucher](https://github.com/filecoin-project/go-data-transfer/blob/21dd66ba370176224114b13030ee68cb785fadb2/datatransfer/types.go#L17) + and [datatransfer.Validator](https://github.com/filecoin-project/go-data-transfer/blob/21dd66ba370176224114b13030ee68cb785fadb2/datatransfer/types.go#L153) + are the interfaces used for validation of graphsync datatransfer messages. Voucher types plus a Validator for them must be registered + with the peer to whom requests will be sent. + +#### Example Toy Voucher and Validator +```go +type myVoucher struct { + data string +} + +func (v *myVoucher) ToBytes() ([]byte, error) { + return []byte(v.data), nil +} + +func (v *myVoucher) FromBytes(data []byte) error { + v.data = string(data) + return nil +} + +func (v *myVoucher) Type() string { + return "FakeDTType" +} + +type myValidator struct { + ctx context.Context + ValidationsReceived chan receivedValidation +} + +func (vl *myValidator) ValidatePush( + sender peer.ID, + voucher datatransfer.Voucher, + baseCid cid.Cid, + selector ipld.Node) error { + + v := voucher.(*myVoucher) + if v.data == "" || v.data != "validpush" { + return errors.New("invalid") + } + + return nil +} + +func (vl *myValidator) ValidatePull( + receiver peer.ID, + voucher datatransfer.Voucher, + baseCid cid.Cid, + selector ipld.Node) error { + + v := voucher.(*myVoucher) + if v.data == "" || v.data != "validpull" { + return errors.New("invalid") + } + + return nil +} + +``` + + +Please see +[go-data-transfer/blob/master/types.go](https://github.com/filecoin-project/go-data-transfer/blob/master/types.go) +for more detail. + + +### Register a validator +Before sending push or pull requests, you must register a `datatransfer.Voucher` +by its `reflect.Type` and `dataTransfer.RequestValidator` for vouchers that +must be sent with the request. Using the trivial examples above: +```go + func NewGraphsyncDatatransfer(h host.Host, gs graphsync.GraphExchange) { + tp := gstransport.NewTransport(h.ID(), gs) + dt := impl.NewDataTransfer(h, tp) + + vouch := &myVoucher{} + mv := &myValidator{} + dt.RegisterVoucherType(reflect.TypeOf(vouch), mv) + } +``` + +For more detail, please see the [unit tests](https://github.com/filecoin-project/go-data-transfer/blob/master/impl/impl_test.go). + +### Open a Push or Pull Request +For a push or pull request, provide a context, a `datatransfer.Voucher`, a host recipient `peer.ID`, a baseCID `cid.CID` and a selector `ipld.Node`. These +calls return a `datatransfer.ChannelID` and any error: +```go + channelID, err := dtm.OpenPullDataChannel(ctx, recipient, voucher, baseCid, selector) + // OR + channelID, err := dtm.OpenPushDataChannel(ctx, recipient, voucher, baseCid, selector) + +``` + +### Subscribe to Events + +The module allows the consumer to be notified when a graphsync Request is sent or a datatransfer push or pull request response is received: + +```go + func ToySubscriberFunc (event Event, channelState ChannelState) { + if event.Code == datatransfer.Error { + // log error, flail about helplessly + return + } + // + if channelState.Recipient() == our.PeerID && channelState.Received() > 0 { + // log some stuff, update some state somewhere, send data to a channel, etc. + } + } + + dtm := SetupDataTransferManager(ctx, h, gs, baseCid, snode) + unsubFunc := dtm.SubscribeToEvents(ToySubscriberFunc) + + // . . . later, when you don't need to know about events any more: + unsubFunc() +``` + +## Contributing +PRs are welcome! Please first read the design docs and look over the current code. PRs against +master require approval of at least two maintainers. For the rest, please see our +[CONTRIBUTING](https://github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md) guide. + +## License +This repository is dual-licensed under Apache 2.0 and MIT terms. + +Copyright 2019. Protocol Labs, Inc. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding.go b/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding.go new file mode 100644 index 0000000000..d2cb84d6e7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding.go @@ -0,0 +1,122 @@ +package encoding + +import ( + "bytes" + "reflect" + + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + cborgen "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +// Encodable is an object that can be written to CBOR and decoded back +type Encodable interface{} + +// Encode encodes an encodable to CBOR, using the best available path for +// writing to CBOR +func Encode(value Encodable) ([]byte, error) { + if cbgEncodable, ok := value.(cborgen.CBORMarshaler); ok { + buf := new(bytes.Buffer) + err := cbgEncodable.MarshalCBOR(buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + if ipldEncodable, ok := value.(ipld.Node); ok { + buf := new(bytes.Buffer) + err := dagcbor.Encoder(ipldEncodable, buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + return cbor.DumpObject(value) +} + +// Decoder is CBOR decoder for a given encodable type +type Decoder interface { + DecodeFromCbor([]byte) (Encodable, error) +} + +// NewDecoder creates a new Decoder that will decode into new instances of the given +// object type. It will use the decoding that is optimal for that type +// It returns error if it's not possible to setup a decoder for this type +func NewDecoder(decodeType Encodable) (Decoder, error) { + // check if type is ipld.Node, if so, just use style + if ipldDecodable, ok := decodeType.(ipld.Node); ok { + return &ipldDecoder{ipldDecodable.Prototype()}, nil + } + // check if type is a pointer, as we need that to make new copies + // for cborgen types & regular IPLD types + decodeReflectType := reflect.TypeOf(decodeType) + if decodeReflectType.Kind() != reflect.Ptr { + return nil, xerrors.New("type must be a pointer") + } + // check if type is a cbor-gen type + if _, ok := decodeType.(cborgen.CBORUnmarshaler); ok { + return &cbgDecoder{decodeReflectType}, nil + } + // type does is neither ipld-prime nor cbor-gen, so we need to see if it + // can rountrip with oldschool ipld-format + encoded, err := cbor.DumpObject(decodeType) + if err != nil { + return nil, xerrors.New("Object type did not encode") + } + newDecodable := reflect.New(decodeReflectType.Elem()).Interface() + if err := cbor.DecodeInto(encoded, newDecodable); err != nil { + return nil, xerrors.New("Object type did not decode") + } + return &defaultDecoder{decodeReflectType}, nil +} + +type ipldDecoder struct { + style ipld.NodePrototype +} + +func (decoder *ipldDecoder) DecodeFromCbor(encoded []byte) (Encodable, error) { + builder := decoder.style.NewBuilder() + buf := bytes.NewReader(encoded) + err := dagcbor.Decoder(builder, buf) + if err != nil { + return nil, err + } + return builder.Build(), nil +} + +type cbgDecoder struct { + cbgType reflect.Type +} + +func (decoder *cbgDecoder) DecodeFromCbor(encoded []byte) (Encodable, error) { + decodedValue := reflect.New(decoder.cbgType.Elem()) + decoded, ok := decodedValue.Interface().(cborgen.CBORUnmarshaler) + if !ok || reflect.ValueOf(decoded).IsNil() { + return nil, xerrors.New("problem instantiating decoded value") + } + buf := bytes.NewReader(encoded) + err := decoded.UnmarshalCBOR(buf) + if err != nil { + return nil, err + } + return decoded, nil +} + +type defaultDecoder struct { + ptrType reflect.Type +} + +func (decoder *defaultDecoder) DecodeFromCbor(encoded []byte) (Encodable, error) { + decodedValue := reflect.New(decoder.ptrType.Elem()) + decoded, ok := decodedValue.Interface().(Encodable) + if !ok || reflect.ValueOf(decoded).IsNil() { + return nil, xerrors.New("problem instantiating decoded value") + } + err := cbor.DecodeInto(encoded, decoded) + if err != nil { + return nil, err + } + return decoded, nil +} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/go.mod b/vendor/github.com/filecoin-project/go-data-transfer/go.mod new file mode 100644 index 0000000000..f2340f32a4 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/go.mod @@ -0,0 +1,36 @@ +module github.com/filecoin-project/go-data-transfer + +go 1.13 + +require ( + github.com/filecoin-project/go-ds-versioning v0.1.0 + github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe + github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b + github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 + github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e + github.com/hashicorp/go-multierror v1.1.0 + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-blockservice v0.1.3 + github.com/ipfs/go-cid v0.0.7 + github.com/ipfs/go-datastore v0.4.5 + github.com/ipfs/go-graphsync v0.3.0 + github.com/ipfs/go-ipfs-blockstore v1.0.1 + github.com/ipfs/go-ipfs-blocksutil v0.0.1 + github.com/ipfs/go-ipfs-chunker v0.0.5 + github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipfs-files v0.0.8 + github.com/ipfs/go-ipld-cbor v0.0.4 + github.com/ipfs/go-ipld-format v0.2.0 + github.com/ipfs/go-log/v2 v2.0.3 + github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-unixfs v0.2.4 + github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f + github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c + github.com/jpillora/backoff v1.0.0 + github.com/libp2p/go-libp2p v0.6.0 + github.com/libp2p/go-libp2p-core v0.5.0 + github.com/stretchr/testify v1.5.1 + github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 + go.uber.org/atomic v1.6.0 + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 +) diff --git a/vendor/github.com/filecoin-project/go-data-transfer/go.sum b/vendor/github.com/filecoin-project/go-data-transfer/go.sum new file mode 100644 index 0000000000..ceb12dbac2 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/go.sum @@ -0,0 +1,876 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 h1:A/EVblehb75cUgXA5njHPn0kLAsykn6mJGz7rnmW5W0= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= +github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= +github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= +github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= +github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8 h1:38X1mKXkiU6Nzw4TOSWD8eTVY5eX3slQunv3QEWfXKg= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= +github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-graphsync v0.3.0 h1:I6Y20kSuCWkUvPoUWo4V3am704/9QjgDVVkf0zIV8+8= +github.com/ipfs/go-graphsync v0.3.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w= +github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= +github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= +github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= +github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1 h1:+gPjbI+V3NktXZOqJA1kzbms2pYmhjgQQal0MzZrOAY= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= +github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-eventbus v0.1.0 h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-flow-metrics v0.0.1 h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.6.0 h1:EFArryT9N7AVA70LCcOh8zxsW+FeDnxwcpWQx9k7+GM= +github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.4 h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf/Kr6hZJj8= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4 h1:Et6ykkTwI6PU44tr8qUF9k43vP0aduMNniShAbUJJw8= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0 h1:FBQ1fpq2Fo/ClyjojVJ5AKXlKhvNc/B6U0O+7AN1ffE= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0 h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2 h1:+Ld7YDAfVERQ0E+qqjE7o6fHwKuM0SqTzYiwN1lVVSA= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-nat v0.0.4 h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5 h1:/mH8pXFVKleflDL1YwqMg27W9GD8kjEx7NY0P6eGc98= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.2.0 h1:XcgJhI8WyUOCbHyRLNEX5542YNj8hnLSJ2G1InRjDhk= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.1.1 h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY= +github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0 h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.2 h1:T4hUpgEs2r371PweU3DuH7EOmBIdTBCwWs+FLcgx3bQ= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0 h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0 h1:5EhPgQhXZNyfL22ERZTUoVp9UVVbNowWNVtELQaKCHk= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1 h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2 h1:eGvbqWqWY9S5lrpe2gA0UCOLCdzCgYSAR3vo/xCsNQg= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1 h1:huPH/GGRJzmsHR9IZJJsrSwIM5YE2gL4ssgl1YWb/ps= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4 h1:KbizNnq8YIf7+Hn7+VFL/xE0eDrkPru2zIO9NMwL8UQ= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3 h1:wjlG7HvQkt4Fq4cfH33Ivpwp0omaElYEi9z26qaIkIk= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1 h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.2.0 h1:MJCw2OrPA9+76YNRvdo1wMnSOxb9Bivj6sVFY1Xrj6w= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0 h1:FsYzT16Wq2XqUGJsBbOxoz9g+dFklvNi7jN6YFPfl7U= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1 h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1 h1:SgG/cw5vqyB5QQe5FPe2TqggU9WtrA9X4nZw7LlVqOI= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0 h1:ZepO8Ezwovd+7b5XPPDhQhayk1yt0AJpzQBpq9fejx4= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2 h1:P7zcBH9FRETdPkDrylcXVjQLQ2t1JQtNItZULWNWgeg= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0 h1:/AJi6DtjFhZKNx3OB2qMsq7y4yT5//AeSZIe7rk+PX8= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361 h1:RIIXAeV6GvDBuADKumTODatUqANFZ+5BPMnzsy4hulY= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1 h1:heWvX7J6qbGWbeFS/aRmiy1eYaT+QMV6wNvHDyMjQV4= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/filecoin-project/go-data-transfer/tools.go b/vendor/github.com/filecoin-project/go-data-transfer/tools.go new file mode 100644 index 0000000000..51b8ceae58 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/tools.go @@ -0,0 +1,7 @@ +// +build tools + +package tools + +import ( + _ "github.com/hannahhoward/cbor-gen-for" +) diff --git a/vendor/github.com/filecoin-project/go-data-transfer/types.go b/vendor/github.com/filecoin-project/go-data-transfer/types.go new file mode 100644 index 0000000000..f2de437d3d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/types.go @@ -0,0 +1,132 @@ +package datatransfer + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + + "github.com/filecoin-project/go-data-transfer/encoding" +) + +//go:generate cbor-gen-for ChannelID + +// TypeIdentifier is a unique string identifier for a type of encodable object in a +// registry +type TypeIdentifier string + +// EmptyTypeIdentifier means there is no voucher present +const EmptyTypeIdentifier = TypeIdentifier("") + +// Registerable is a type of object in a registry. It must be encodable and must +// have a single method that uniquely identifies its type +type Registerable interface { + encoding.Encodable + // Type is a unique string identifier for this voucher type + Type() TypeIdentifier +} + +// Voucher is used to validate +// a data transfer request against the underlying storage or retrieval deal +// that precipitated it. The only requirement is a voucher can read and write +// from bytes, and has a string identifier type +type Voucher Registerable + +// VoucherResult is used to provide option additional information about a +// voucher being rejected or accepted +type VoucherResult Registerable + +// TransferID is an identifier for a data transfer, shared between +// request/responder and unique to the requester +type TransferID uint64 + +// ChannelID is a unique identifier for a channel, distinct by both the other +// party's peer ID + the transfer ID +type ChannelID struct { + Initiator peer.ID + Responder peer.ID + ID TransferID +} + +func (c ChannelID) String() string { + return fmt.Sprintf("%s-%s-%d", c.Initiator, c.Responder, c.ID) +} + +// OtherParty returns the peer on the other side of the request, depending +// on whether this peer is the initiator or responder +func (c ChannelID) OtherParty(thisPeer peer.ID) peer.ID { + if thisPeer == c.Initiator { + return c.Responder + } + return c.Initiator +} + +// Channel represents all the parameters for a single data transfer +type Channel interface { + // TransferID returns the transfer id for this channel + TransferID() TransferID + + // BaseCID returns the CID that is at the root of this data transfer + BaseCID() cid.Cid + + // Selector returns the IPLD selector for this data transfer (represented as + // an IPLD node) + Selector() ipld.Node + + // Voucher returns the voucher for this data transfer + Voucher() Voucher + + // Sender returns the peer id for the node that is sending data + Sender() peer.ID + + // Recipient returns the peer id for the node that is receiving data + Recipient() peer.ID + + // TotalSize returns the total size for the data being transferred + TotalSize() uint64 + + // IsPull returns whether this is a pull request + IsPull() bool + + // ChannelID returns the ChannelID for this request + ChannelID() ChannelID + + // OtherPeer returns the counter party peer for this channel + OtherPeer() peer.ID + + // ReceivedCids returns the cids received so far on the channel + ReceivedCids() []cid.Cid +} + +// ChannelState is channel parameters plus it's current state +type ChannelState interface { + Channel + + // SelfPeer returns the peer this channel belongs to + SelfPeer() peer.ID + + // Status is the current status of this channel + Status() Status + + // Sent returns the number of bytes sent + Sent() uint64 + + // Received returns the number of bytes received + Received() uint64 + + // Message offers additional information about the current status + Message() string + + // Vouchers returns all vouchers sent on this channel + Vouchers() []Voucher + + // VoucherResults are results of vouchers sent on the channel + VoucherResults() []VoucherResult + + // LastVoucher returns the last voucher sent on the channel + LastVoucher() Voucher + + // LastVoucherResult returns the last voucher result sent on the channel + LastVoucherResult() VoucherResult +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/COPYRIGHT b/vendor/github.com/filecoin-project/go-fil-markets/COPYRIGHT new file mode 100644 index 0000000000..771e6f7cd7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-fil-markets/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-fil-markets/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-fil-markets/LICENSE-MIT b/vendor/github.com/filecoin-project/go-fil-markets/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-fil-markets/filestore/README.md b/vendor/github.com/filecoin-project/go-fil-markets/filestore/README.md new file mode 100644 index 0000000000..2557435cd2 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/filestore/README.md @@ -0,0 +1,29 @@ +# filestore + +The `filestore` module is a simple wrapper for os.File. It is used by [pieceio](../pieceio), +[retrievialmarket](../retrievalmarket), and [storagemarket](../storagemarket). + +## Installation +```bash +go get github.com/filecoin-project/go-fil-markets/filestore +``` + +## FileStore +FileStore is the primary export of this module. + +### Usage +To create a new local filestore mounted on a given local directory path, use: +```go +package filestore + +func NewLocalFileStore(basedirectory OsPath) (FileStore, error) +``` + +A FileStore provides the following functions: +* [`Open`](filestore.go) +* [`Create`](filestore.go) +* [`Store`](filestore.go) +* [`Delete`](filestore.go) +* [`CreateTemp`](filestore.go) + +Please the [tests](filestore_test.go) for more information about expected behavior. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/filestore/file.go b/vendor/github.com/filecoin-project/go-fil-markets/filestore/file.go new file mode 100644 index 0000000000..119ced0f17 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/filestore/file.go @@ -0,0 +1,39 @@ +package filestore + +import ( + "os" + "path" +) + +type fd struct { + *os.File + filename string + basepath string +} + +func newFile(basepath OsPath, filename Path) (File, error) { + var err error + result := fd{filename: string(filename), basepath: string(basepath)} + full := path.Join(string(basepath), string(filename)) + result.File, err = os.OpenFile(full, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return nil, err + } + return &result, nil +} + +func (f fd) Path() Path { + return Path(f.filename) +} + +func (f fd) OsPath() OsPath { + return OsPath(f.Name()) +} + +func (f fd) Size() int64 { + info, err := os.Stat(f.Name()) + if err != nil { + return -1 + } + return info.Size() +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/filestore/filestore.go b/vendor/github.com/filecoin-project/go-fil-markets/filestore/filestore.go new file mode 100644 index 0000000000..4438a8b1da --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/filestore/filestore.go @@ -0,0 +1,75 @@ +package filestore + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +type fileStore struct { + base string +} + +// NewLocalFileStore creates a filestore mounted on a given local directory path +func NewLocalFileStore(basedirectory OsPath) (FileStore, error) { + base := filepath.Clean(string(basedirectory)) + info, err := os.Stat(string(base)) + if err != nil { + return nil, fmt.Errorf("error getting %s info: %s", base, err.Error()) + } + if !info.IsDir() { + return nil, fmt.Errorf("%s is not a directory", base) + } + return &fileStore{string(base)}, nil +} + +func (fs fileStore) filename(p Path) string { + return filepath.Join(fs.base, string(p)) +} + +func (fs fileStore) Open(p Path) (File, error) { + name := fs.filename(p) + if _, err := os.Stat(name); err != nil { + return nil, fmt.Errorf("error trying to open %s: %s", name, err.Error()) + } + return newFile(OsPath(fs.base), p) +} + +func (fs fileStore) Create(p Path) (File, error) { + name := fs.filename(p) + if _, err := os.Stat(name); err == nil { + return nil, fmt.Errorf("file %s already exists", name) + } + return newFile(OsPath(fs.base), p) +} + +func (fs fileStore) Store(p Path, src File) (Path, error) { + dest, err := fs.Create(p) + if err != nil { + return Path(""), err + } + + if _, err = io.Copy(dest, src); err != nil { + dest.Close() + return Path(""), err + } + return p, dest.Close() +} + +func (fs fileStore) Delete(p Path) error { + filename := string(p) + full := path.Join(string(fs.base), string(filename)) + return os.Remove(full) +} + +func (fs fileStore) CreateTemp() (File, error) { + f, err := ioutil.TempFile(fs.base, "fstmp") + if err != nil { + return nil, err + } + filename := filepath.Base(f.Name()) + return &fd{File: f, basepath: fs.base, filename: filename}, nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/filestore/types.go b/vendor/github.com/filecoin-project/go-fil-markets/filestore/types.go new file mode 100644 index 0000000000..230237671f --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/filestore/types.go @@ -0,0 +1,36 @@ +package filestore + +import "io" + +// Path represents an abstract path to a file +type Path string + +// OsPath represents a path that can be located on +// the operating system with standard os.File operations +type OsPath string + +// File is a wrapper around an os file +type File interface { + Path() Path + OsPath() OsPath + Size() int64 + + io.Closer + io.Reader + io.Writer + io.Seeker +} + +// FileStore is an abstract filestore, used for storing temporary file data +// when handing off a deal to the Storage Mining module. Files are created by +// the storage market module, their path is given to the storage mining module +// when AddPiece is called. The Storage Mining module then reads from them +// from the FileStore, and deletes them once they have been sealed in a sector +type FileStore interface { + Open(p Path) (File, error) + Create(p Path) (File, error) + Store(p Path, f File) (Path, error) + Delete(p Path) error + + CreateTemp() (File, error) +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/README.md b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/README.md new file mode 100644 index 0000000000..21c490a857 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/README.md @@ -0,0 +1,48 @@ +# piecestore + +The `piecestore` module is a simple encapsulation of two data stores, one for `PieceInfo` and + another for `CIDInfo`. The piecestore's main goal is to help + [storagemarket module](../storagemarket) and [retrievalmarket module](../retrievalmarket) + find where sealed data lives inside of sectors. Storage market writes the + data, and retrieval market reads it. + +Both markets use `CIDInfo` to look up a Piece that contains the payload, and then + use `PieceInfo` to find the sector that contains the piece. + +The storage market has to write this data before it completes the deal in order to later + look up the payload when the data is served. + +## Installation +```bash +go get github.com/filecoin-project/go-fil-markets/piecestore +``` + +### PieceStore +`PieceStore` is primary export of this module. It is a database +of piece info that can be modified and queried. The PieceStore +interface is implemented in [piecestore.go](./piecestore.go). + +It has two stores, one for `PieceInfo` keyed by `pieceCID`, and another for +`CIDInfo`, keyed by `payloadCID`. These keys are of type `cid.CID`; see +[github.com/ipfs/go-cid](https://github.com/ipfs/go-cid). + +**To initialize a PieceStore** +```go +func NewPieceStore(ds datastore.Batching) PieceStore +``` + +**Parameters** +* `ds datastore.Batching` is a datastore for the deal's state machine. It is + typically the node's own datastore that implements the IPFS datastore.Batching interface. + See + [github.com/ipfs/go-datastore](https://github.com/ipfs/go-datastore). + + +`PieceStore` implements the following functions: + +* [`AddDealForPiece`](./piecestore.go) +* [`AddPieceBlockLocations`](./piecestore.go) +* [`GetPieceInfo`](./piecestore.go) +* [`GetCIDInfo`](./piecestore.go) + +Please the [tests](piecestore_test.go) for more information about expected behavior. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go new file mode 100644 index 0000000000..42fdd3426f --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go @@ -0,0 +1,66 @@ +package piecestore + +import ( + "context" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +//go:generate cbor-gen-for --map-encoding PieceInfo DealInfo BlockLocation PieceBlockLocation CIDInfo + +// DealInfo is information about a single deal for a given piece +type DealInfo struct { + DealID abi.DealID + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize +} + +// BlockLocation is information about where a given block is relative to the overall piece +type BlockLocation struct { + RelOffset uint64 + BlockSize uint64 +} + +// PieceBlockLocation is block information along with the pieceCID of the piece the block +// is inside of +type PieceBlockLocation struct { + BlockLocation + PieceCID cid.Cid +} + +// CIDInfo is information about where a given CID will live inside a piece +type CIDInfo struct { + CID cid.Cid + PieceBlockLocations []PieceBlockLocation +} + +// CIDInfoUndefined is cid info with no information +var CIDInfoUndefined = CIDInfo{} + +// PieceInfo is metadata about a piece a provider may be storing based +// on its PieceCID -- so that, given a pieceCID during retrieval, the miner +// can determine how to unseal it if needed +type PieceInfo struct { + PieceCID cid.Cid + Deals []DealInfo +} + +// PieceInfoUndefined is piece info with no information +var PieceInfoUndefined = PieceInfo{} + +// PieceStore is a saved database of piece info that can be modified and queried +type PieceStore interface { + Start(ctx context.Context) error + OnReady(ready shared.ReadyFunc) + AddDealForPiece(pieceCID cid.Cid, dealInfo DealInfo) error + AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]BlockLocation) error + GetPieceInfo(pieceCID cid.Cid) (PieceInfo, error) + GetCIDInfo(payloadCID cid.Cid) (CIDInfo, error) + ListCidInfoKeys() ([]cid.Cid, error) + ListPieceInfoKeys() ([]cid.Cid, error) +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types_cbor_gen.go new file mode 100644 index 0000000000..f01eaf0280 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types_cbor_gen.go @@ -0,0 +1,695 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package piecestore + +import ( + "fmt" + "io" + + abi "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +func (t *PieceInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + // t.Deals ([]piecestore.DealInfo) (slice) + if len("Deals") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Deals\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Deals"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Deals")); err != nil { + return err + } + + if len(t.Deals) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Deals was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Deals))); err != nil { + return err + } + for _, v := range t.Deals { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *PieceInfo) UnmarshalCBOR(r io.Reader) error { + *t = PieceInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + // t.Deals ([]piecestore.DealInfo) (slice) + case "Deals": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Deals: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Deals = make([]DealInfo, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealInfo + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Deals[i] = v + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *DealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{164}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.SectorID (abi.SectorNumber) (uint64) + if len("SectorID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SectorID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SectorID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { + return err + } + + // t.Offset (abi.PaddedPieceSize) (uint64) + if len("Offset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Offset\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Offset"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Offset")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.Length (abi.PaddedPieceSize) (uint64) + if len("Length") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Length\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Length"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Length")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Length)); err != nil { + return err + } + + return nil +} + +func (t *DealInfo) UnmarshalCBOR(r io.Reader) error { + *t = DealInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.SectorID (abi.SectorNumber) (uint64) + case "SectorID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorID = abi.SectorNumber(extra) + + } + // t.Offset (abi.PaddedPieceSize) (uint64) + case "Offset": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.Length (abi.PaddedPieceSize) (uint64) + case "Length": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = abi.PaddedPieceSize(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *BlockLocation) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.RelOffset (uint64) (uint64) + if len("RelOffset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RelOffset\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("RelOffset"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("RelOffset")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.RelOffset)); err != nil { + return err + } + + // t.BlockSize (uint64) (uint64) + if len("BlockSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BlockSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BlockSize")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.BlockSize)); err != nil { + return err + } + + return nil +} + +func (t *BlockLocation) UnmarshalCBOR(r io.Reader) error { + *t = BlockLocation{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("BlockLocation: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.RelOffset (uint64) (uint64) + case "RelOffset": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RelOffset = uint64(extra) + + } + // t.BlockSize (uint64) (uint64) + case "BlockSize": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BlockSize = uint64(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *PieceBlockLocation) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.BlockLocation (piecestore.BlockLocation) (struct) + if len("BlockLocation") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockLocation\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BlockLocation"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BlockLocation")); err != nil { + return err + } + + if err := t.BlockLocation.MarshalCBOR(w); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + return nil +} + +func (t *PieceBlockLocation) UnmarshalCBOR(r io.Reader) error { + *t = PieceBlockLocation{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceBlockLocation: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.BlockLocation (piecestore.BlockLocation) (struct) + case "BlockLocation": + + { + + if err := t.BlockLocation.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.BlockLocation: %w", err) + } + + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *CIDInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.CID (cid.Cid) (struct) + if len("CID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CID")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.CID); err != nil { + return xerrors.Errorf("failed to write cid field t.CID: %w", err) + } + + // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + if len("PieceBlockLocations") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceBlockLocations\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceBlockLocations"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceBlockLocations")); err != nil { + return err + } + + if len(t.PieceBlockLocations) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.PieceBlockLocations was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.PieceBlockLocations))); err != nil { + return err + } + for _, v := range t.PieceBlockLocations { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *CIDInfo) UnmarshalCBOR(r io.Reader) error { + *t = CIDInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("CIDInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.CID (cid.Cid) (struct) + case "CID": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CID: %w", err) + } + + t.CID = c + + } + // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + case "PieceBlockLocations": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.PieceBlockLocations: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.PieceBlockLocations = make([]PieceBlockLocation, extra) + } + + for i := 0; i < int(extra); i++ { + + var v PieceBlockLocation + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.PieceBlockLocations[i] = v + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/README.md b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/README.md new file mode 100644 index 0000000000..bd7653a45b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/README.md @@ -0,0 +1,303 @@ +# retrievalmarket +The retrievalmarket module is intended for Filecoin node implementations written in Go. +It implements functionality to allow execution of retrieval market deals on the +Filecoin network. +The node implementation must provide access to chain operations, and persistent +data storage. + +## Table of Contents +* [Background reading](#Background-reading) +* [Installation](#Installation) +* [Operation](#Operation) +* [Implementation](#Implementation) + * [Peer Resolver](#Peer_Resolver) + * [RetrievalClientNode](#RetrievalClientNode) + * [RetrievalProviderNode](#RetrievalProviderNode) +* [Construction](#Construction) + * [Construct a RetrievalClient](#RetrievalClient) + * [Construct a RetrievalProvider](#RetrievalProvider) +* [Technical Documentation](#technical-documentation) + +## Background reading +Please see the +[Filecoin Retrieval Market Specification](https://filecoin-project.github.io/specs/#systems__filecoin_markets__retrieval_market). + +## Installation +The build process for retrievalmarket requires Go >= v1.13. + +To install: +```bash +go get github.com/filecoin-project/go-fil-markets/retrievalmarket +``` + +## Operation + +The `retrievalmarket` package provides high level APIs to execute data retrieval deals between a + retrieval client and a retrieval + provider (a.k.a. retrieval miner) on the Filecoin netwwork. + The node must implement the `PeerResolver`, `RetrievalProviderNode`, and + `RetrievalClientNode` interfaces in order to construct and use the module. + +Deals are expected to survive a node restart; deals and related information are + expected to be stored on disk. + +`retrievalmarket` communicates its deal operations and requested data via +[go-data-transfer](https://github.com/filecoin-project/go-data-transfer) using +[go-graphsync](https://github.com/ipfs/go-graphsync). + +Once required Node APIs are implemented and the retrievalmarket APIs are exposed to your desired + consumers (such as a command-line or web interface), a retrieval from the client side could +proceed roughly like so: +1. Your node has a record of data with payloadCIDs and their respective pieceCIDs. Someone, +possibly you, wants to retrieve data referenced by `paylaodCID`. +1. It calls `PeerResolver.GetPeers` to obtain a list of retrieval providers storing data + referenced by `payloadCID`. +1. It obtains retrieval deal terms by calling each retrieval miners' `Query` function. +1. The node selects the best terms for a retrieval deal and initiates a deal by calling + the retrieval client's `Retrieve` function with the selected retrieval miner and piece info. +1. The deal then proceeds automatically until all the data is returned and full payment in the + form of vouchers is made to the retrieval provider, or the deal errors. +1. Once the deal is complete and the final payment voucher is posted to chain, your client account balance + will be adjusted according to the terms of the deal. + +A retrieval from the provider side is more automated; the RetrievalProvider would be listening + for retrieval Query and Retrieve requests, and respond accordingly. + +1. Your node stores a record of what it has stored locally, or possibly a record of peers + with data. +1. Your node receives a Query for `payloadCID` and responds automatically with the terms you the + node operator have set for retrieval deals. +1. Your node receives a DealProposal for retrieval, and automatically validates and accepts or + rejects it. If accepted, the deal proceeds and your node begins sending data in pieces, stopping + every so often to request another voucher for a greater value. +1. Once the deal is complete and your node has received a voucher sufficient to cover the entire +data transfer, you the node operator may then redeem the voucher and collect FIL. + +### Collecting FIL for a deal is the node's responsibility +To collect your FIL, your node must send on-chain +messages directly to the payment channel actor to send all the vouchers, +Settle, and Collect on the deal. This will finalize the client and provider balances for the +retrieval deal on the Filecoin blockchain. Implementation and timing of these calls is the node's +responsibility and is not a part of `retrievalmarket`. For more information about how +to interact with the +payment channel actor, see the +[github.com/filecoin-project/specs-actors](https://github.com/filecoin-project/specs-actors) repo. + +## Implementation + +### General Steps +1. Decide if your node can be configured as a Retrieval Provider, a Retrieval Client or both. +1. Determine how and where your retrieval calls to RetrievalProvider and RetrievalClient functions + will be made. +1. Implement the required interfaces as described in this section. +1. [Construct a RetrievalClient](#RetrievalClient) in your node's startup, if your + node will be a client. +1. [Construct a RetrievalProvider](#RetrievalProvider) in your node's startup, if your + node will be a provider. +If setting up a RetrievalProvider, call its `Start` function it in the appropriate place, and its + `Stop` function in the appropriate place. +1. Expose desired `retrievalmarket` functionality to whatever internal modules desired, such as + command line interface, JSON RPC, or HTTP API. + +Implement the [`PeerResolver`](#PeerResolver), [`RetrievalProviderNode`](#RetrievalProviderNode), +and [`RetrievalClientNode`](#RetrievalClientNode) +interfaces in [retrievalmarket/types.go](./types.go), described below: + +### PeerResolver +PeerResolver is an interface for looking up providers that may have a piece of identifiable +data. Its functions are: + +#### GetPeers +```go +func GetPeers(payloadCID cid.Cid) ([]RetrievalPeer, error) +``` +Return a slice of RetrievalPeers that store the data referenced by `payloadCID`. + +--- +### RetrievalClientNode + +`RetrievalClientNode` contains the node dependencies for a RetrievalClient. Its functions are: + +* [`AllocateLane`](#AllocateLane) +* [`GetChainHead`](#GetChainHead) +* [`GetOrCreatePaymentChannel`](#GetOrCreatePaymentChannel) +* [`CreatePaymentVoucher`](#CreatePaymentVoucher) +* [`WaitForPaymentChannelAddFunds`](#WaitForPaymentChannelAddFunds) +* [`WaitForPaymentChannelCreation`](#WaitForPaymentChannelCreation) + +#### AllocateLane +```go +func AllocateLane(paymentChannel address.Address) (uint64, error) +``` + +Create a lane within `paymentChannel` so that calls to CreatePaymentVoucher will +automatically make vouchers only for the difference in total. Note that payment channel +Actors have a +[lane limit](https://github.com/filecoin-project/specs-actors/blob/0df536f7e461599c818231aa0effcdaccbb74900/actors/builtin/paych/paych_actor.go#L20). + +#### CreatePaymentVoucher +```go +func CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, + amount abi.TokenAmount, lane uint64, tok shared.TipSetToken + ) (*paych.SignedVoucher, error) +``` +Create a new payment voucher for `paymentChannel` with `amount`, for lane `lane`, given chain +state at `tok`. + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### GetOrCreatePaymentChannel +```go +func GetOrCreatePaymentChannel(ctx context.Context, clientAddress, minerAddress address.Address, + amount abi.TokenAmount, tok shared.TipSetToken + ) (address.Address, cid.Cid, error) +``` +If there is a current payment channel for deals between `clientAddress` and `minerAddress`, +add `amount` to the channel, then return the payment channel address and `cid.Undef`. + +If there isn't, construct a new payment channel actor with `amount` funds by posting +the corresponding message on chain, then return `address.Undef` and the posted message `cid.Cid`. +For more information about how to construct a payment channel actor, see +[github.com/filecoin-project/specs-actors](https://github.com/filecoin-project/specs-actors) + +#### WaitForPaymentChannelAddFunds +```go +func WaitForPaymentChannelAddFunds(messageCID cid.Cid) error +``` +Wait for message with CID `messageCID` on chain that funds have been sent to a payment channel. + +#### WaitForPaymentChannelCreation +```go +func WaitForPaymentChannelCreation(messageCID cid.Cid) (address.Address, error) +``` +Wait for a message on chain with CID `messageCID` that a payment channel has been created. + +--- +### RetrievalProviderNode +`RetrievalProviderNode` contains the node dependencies for a RetrievalProvider. +Its functions are: + +* [`GetChainHead`](#GetChainHead) +* [`GetMinerWorkerAddress`](#GetMinerWorkerAddress) +* [`UnsealSector`](#UnsealSector) +* [`SavePaymentVoucher`](#SavePaymentVoucher) + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### GetMinerWorkerAddress +```go +func GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken, + ) (address.Address, error) +``` +Get the miner worker address for the given miner owner, as of `tok`. + +#### UnsealSector +```go +func UnsealSector(ctx context.Context, sectorID uint64, offset uint64, length uint64, + ) (io.ReadCloser, error) +``` +Unseal `length` data contained in `sectorID`, starting at `offset`. Return an `io.ReadCloser +` for accessing the data. + +#### SavePaymentVoucher +```go +func SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, + voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, + tok shared.TipSetToken) (abi.TokenAmount, error) +``` + +Save the provided `paych.SignedVoucher` for `paymentChannel`. The RetrievalProviderNode +implementation should validate the SignedVoucher using the provided `proof`, ` +expectedAmount`, based on the chain state referenced by `tok`. The value of the +voucher should be equal or greater than the largest previous voucher by + `expectedAmount`. It returns the actual difference. + + +## Construction +### RetrievalClient +```go +package retrievalimpl +func NewClient( + netwk network.RetrievalMarketNetwork, + bs blockstore.Blockstore, + node retrievalmarket.RetrievalClientNode, + resolver retrievalmarket.PeerResolver, + ds datastore.Batching, + storedCounter *storedcounter.StoredCounter, +) (retrievalmarket.RetrievalClient, error) +``` +#### Parameters +* `netwk rmnet.RetrievalMarketNetwork` + `RetrievalMarketNetwork` is an interface for creating and handling deal streams. To create it: + + ```go + package network + + func NewFromLibp2pHost(h host.Host) RetrievalMarketNetwork + ``` + where `h host.Host` is your node's libp2p Host. + See + [github.com/libp2p/go-libp2p-core/host](https://github.com/libp2p/go-libp2p-core/host). + +* `bs blockstore.Blockstore` is an IPFS blockstore for storing and retrieving data for deals. + See + [github.com/ipfs/go-ipfs-blockstore](github.com/ipfs/go-ipfs-blockstore). + +* `node retrievalmarket.RetrievalClientNode` is the `RetrievalClientNode` interface you have + implemented. + +* `resolver retrievalmarket.PeerResolver` is the `PeerResolver` interface you have implemented. +* `ds datastore.Batching` is a datastore for the deal's state machine. It is + typically the node's own datastore that implements the IPFS datastore.Batching interface. + See + [github.com/ipfs/go-datastore](https://github.com/ipfs/go-datastore). + + * `storedCounter *storedcounter.StoredCounter` is a file-based stored counter used to generate new + dealIDs. See + [github.com/filecoin-project/go-storedcounter](https://github.com/filecoin-project/go-storedcounter). + +### RetrievalProvider +```go +package retrievalimpl + +func NewProvider(minerAddress address.Address, + node retrievalmarket.RetrievalProviderNode, + netwk network.RetrievalMarketNetwork, + pieceStore piecestore.PieceStore, + bs blockstore.Blockstore, + ds datastore.Batching, + ) (retrievalmarket.RetrievalProvider, error) +``` + +#### Parameters +* `minerAddress address.Address` is the address of the retrieval miner owner. +* `node retrievalmarket.RetrievalProviderNode` is the `RetrievalProviderNode` API you have implemented. +* `netwk rmnet.RetrievalMarketNetwork` is the same interface for creating and handling deal streams +as for [constructing a RetrievalClient](#RetrievalClient). +* `pieceStore piecestore.PieceStore` is the database of deals and pieces associated with them. +See this repo's [piecestore module](../piecestore). +* `bs blockstore.Blockstore` is the same interface as for +[constructing a RetrievalClient](#RetrievalClient). +* `ds datastore.Batching` is the same batching datastore interface as for +[constructing a RetrievalClient](#RetrievalClient). + +## Technical Documentation + +* [GoDoc](https://godoc.org/github.com/filecoin-project/go-fil-markets/retrievalmarket) contains an architectural overview and robust API documentation + +* Retrieval Client FSM diagram: + +[![Diagram of RetrievalClientFSM](../docs/retrievalclient.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg) + + +* Retrieval Provider FSM diagram: + +[![Diagram of RetrievalClientFSM](../docs/retrievalprovider.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/common.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/common.go new file mode 100644 index 0000000000..52057c6609 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/common.go @@ -0,0 +1,22 @@ +package retrievalmarket + +import ( + "bytes" + + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + cbg "github.com/whyrusleeping/cbor-gen" +) + +// DecodeNode validates and computes a decoded ipld.Node selector from the +// provided cbor-encoded selector +func DecodeNode(defnode *cbg.Deferred) (ipld.Node, error) { + reader := bytes.NewReader(defnode.Raw) + nb := basicnode.Prototype.Any.NewBuilder() + err := dagcbor.Decoder(nb, reader) + if err != nil { + return nil, err + } + return nb.Build(), nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/doc.go new file mode 100644 index 0000000000..06a445ea52 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/doc.go @@ -0,0 +1,108 @@ +/* +Package retrievalmarket implements the Filecoin retrieval protocol. + +An overview of the retrieval protocol can be found in the Filecoin specification: + +https://filecoin-project.github.io/specs/#systems__filecoin_markets__retrieval_market + +The following architectural components provide a brief overview of the design of +the retrieval market module: + +Public Interfaces And Node Dependencies + +While retrieval deals primarily happen off-chain, there are some chain operations +that must be performed by a Filecoin node implementation. The module is intended to separate +the primarily off-chain retrieval deal flow from the on-chain operations related primarily +to payment channels, the mechanism for getting paid for retrieval deals. + +As such for both the client and the provider in the retrieval market, the module defines a top level +public interface which it provides an implementation for, and a node interface that must be implemented +by the Filecoin node itself, and provided as a dependency. These node interfaces provide a universal way to +talk to potentially multiple different Filecoin node implementations, and can be implemented as using HTTP +or other interprocess communication to talk to a node implementation running in a different process. + +The top level interfaces this package implements are RetrievalClient & RetrievalProvider. The dependencies the Filecoin +node is expected to implement are RetrievalClientNode & RetrievalProviderNode. Further documentation of exactly what those +dependencies should do can be found in the readme. + +Finite State Machines + +While retrieval deals in general should be fairly fast, making a retrieval deal is still an asynchronous process. +As documented in the Filecoin spec, the basic architecture of the Filecoin retrieval protocol is incremental payments. +Because neither client nor provider trust each other, we bootstrap trust by essentially paying in small increments as we receive +data. The client only sends payment when it verifies data and the provider only sends more data when it receives payment. +Not surprisingly, many things can go wrong along the way. To manage this back and forth asynchronous process, +we use finite state machines that update deal state when discrete events occur. State updates +always persist state to disk. This means we have a permanent record of exactly what's going on with deals at any time, +and we can ideally survive our Filecoin processes shutting down and restarting. + +The following diagrams visualize the statemachine flows for the client and the provider: + +Client FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg + +Provider FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg + +Identifying Retrieval Providers + +The RetrievalClient provides two functions to locate a provider from which to retrieve data. + +`FindProviders` returns a list of retrieval peers who may have the data your looking for. FindProviders delegates its work to +an implementation of the PeerResolver interface. + +`Query` queries a specific retrieval provider to find out definitively if they have the requested data and if so, the +parameters they will accept for a retrieval deal. + +Deal Flow + +The primary mechanism for initiating storage deals is the `Retrieve` method on the RetrievalClient. + +When `Retrieve` is called, it allocates a new DealID from its stored counter, constructs a DealProposal, sends +the deal proposal to the provider, initiates tracking of deal state and hands the deal to the Client FSM, +and returns the DealID which constitutes the identifier for that deal. + +The Retrieval provider receives the deal in `HandleDealStream`. `HandleDealStream` initiates tracking of deal state +on the Provider side and hands the deal to the Provider FSM, which handles the rest of deal flow. + +From this point forward, deal negotiation is completely asynchronous and runs in the FSMs. + +A user of the modules can monitor deal progress through `SubscribeToEvents` methods on RetrievalClient and RetrievalProvider, +or by simply calling `ListDeals` to get all deal statuses. + +The FSMs implement every remaining step in deal negotiation. Importantly, the RetrievalProvider delegates unsealing sectors +back to the node via the `UnsealSector` method (the node itself likely delegates management of sectors and sealing to an +implementation of the Storage Mining subsystem of the Filecoin spec). Sectors are unsealed on an as needed basis using +the `PieceStore` to locate sectors that contain data related to the deal. + +Major Dependencies + +Other libraries in go-fil-markets: + +https://github.com/filecoin-project/go-fil-markets/tree/master/piecestore - used to locate data for deals in sectors +https://github.com/filecoin-project/go-fil-markets/tree/master/shared - types and utility functions shared with +storagemarket package + +Other Filecoin Repos: + +https://github.com/filecoin-project/go-data-transfer - for transferring data, via go-graphsync +https://github.com/filecoin-project/go-statemachine - a finite state machine that tracks deal state +https://github.com/filecoin-project/go-storedcounter - for generating and persisting unique deal IDs +https://github.com/filecoin-project/specs-actors - the Filecoin actors + +IPFS Project Repos: + +https://github.com/ipfs/go-graphsync - used by go-data-transfer +https://github.com/ipfs/go-datastore - for persisting statemachine state for deals +https://github.com/ipfs/go-ipfs-blockstore - for storing and retrieving block data for deals + +Other Repos: + +https://github.com/libp2p/go-libp2p) the network over which retrieval deal data is exchanged. +https://github.com/hannahhoward/go-pubsub - for pub/sub notifications external to the statemachine + +Root package + +This top level package defines top level enumerations and interfaces. The primary implementation +lives in the `impl` directory + +*/ +package retrievalmarket diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go new file mode 100644 index 0000000000..e954679f2b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go @@ -0,0 +1,383 @@ +package retrievalmarket + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/actors/builtin/paych" + + "github.com/filecoin-project/go-fil-markets/piecestore" +) + +//go:generate cbor-gen-for --map-encoding Query QueryResponse DealProposal DealResponse Params QueryParams DealPayment ClientDealState ProviderDealState PaymentInfo RetrievalPeer Ask + +// QueryProtocolID is the protocol for querying information about retrieval +// deal parameters +const QueryProtocolID = protocol.ID("/fil/retrieval/qry/1.0.0") + +// OldQueryProtocolID is the old query protocol for tuple structs +const OldQueryProtocolID = protocol.ID("/fil/retrieval/qry/0.0.1") + +// Unsubscribe is a function that unsubscribes a subscriber for either the +// client or the provider +type Unsubscribe func() + +// PaymentInfo is the payment channel and lane for a deal, once it is setup +type PaymentInfo struct { + PayCh address.Address + Lane uint64 +} + +// ClientDealState is the current state of a deal from the point of view +// of a retrieval client +type ClientDealState struct { + DealProposal + StoreID *multistore.StoreID + ChannelID datatransfer.ChannelID + LastPaymentRequested bool + AllBlocksReceived bool + TotalFunds abi.TokenAmount + ClientWallet address.Address + MinerWallet address.Address + PaymentInfo *PaymentInfo + Status DealStatus + Sender peer.ID + TotalReceived uint64 + Message string + BytesPaidFor uint64 + CurrentInterval uint64 + PaymentRequested abi.TokenAmount + FundsSpent abi.TokenAmount + UnsealFundsPaid abi.TokenAmount + WaitMsgCID *cid.Cid // the CID of any message the client deal is waiting for + VoucherShortfall abi.TokenAmount + LegacyProtocol bool +} + +// ProviderDealState is the current state of a deal from the point of view +// of a retrieval provider +type ProviderDealState struct { + DealProposal + StoreID multistore.StoreID + ChannelID datatransfer.ChannelID + PieceInfo *piecestore.PieceInfo + Status DealStatus + Receiver peer.ID + TotalSent uint64 + FundsReceived abi.TokenAmount + Message string + CurrentInterval uint64 + LegacyProtocol bool +} + +// Identifier provides a unique id for this provider deal +func (pds ProviderDealState) Identifier() ProviderDealIdentifier { + return ProviderDealIdentifier{Receiver: pds.Receiver, DealID: pds.ID} +} + +// ProviderDealIdentifier is a value that uniquely identifies a deal +type ProviderDealIdentifier struct { + Receiver peer.ID + DealID DealID +} + +func (p ProviderDealIdentifier) String() string { + return fmt.Sprintf("%v/%v", p.Receiver, p.DealID) +} + +// RetrievalPeer is a provider address/peer.ID pair (everything needed to make +// deals for with a miner) +type RetrievalPeer struct { + Address address.Address + ID peer.ID // optional + PieceCID *cid.Cid +} + +// QueryResponseStatus indicates whether a queried piece is available +type QueryResponseStatus uint64 + +const ( + // QueryResponseAvailable indicates a provider has a piece and is prepared to + // return it + QueryResponseAvailable QueryResponseStatus = iota + + // QueryResponseUnavailable indicates a provider either does not have or cannot + // serve the queried piece to the client + QueryResponseUnavailable + + // QueryResponseError indicates something went wrong generating a query response + QueryResponseError +) + +// QueryItemStatus (V1) indicates whether the requested part of a piece (payload or selector) +// is available for retrieval +type QueryItemStatus uint64 + +const ( + // QueryItemAvailable indicates requested part of the piece is available to be + // served + QueryItemAvailable QueryItemStatus = iota + + // QueryItemUnavailable indicates the piece either does not contain the requested + // item or it cannot be served + QueryItemUnavailable + + // QueryItemUnknown indicates the provider cannot determine if the given item + // is part of the requested piece (for example, if the piece is sealed and the + // miner does not maintain a payload CID index) + QueryItemUnknown +) + +// QueryParams - V1 - indicate what specific information about a piece that a retrieval +// client is interested in, as well as specific parameters the client is seeking +// for the retrieval deal +type QueryParams struct { + PieceCID *cid.Cid // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //Selector ipld.Node // optional, query if miner has this cid in this piece. some miners may not be able to respond. + //MaxPricePerByte abi.TokenAmount // optional, tell miner uninterested if more expensive than this + //MinPaymentInterval uint64 // optional, tell miner uninterested unless payment interval is greater than this + //MinPaymentIntervalIncrease uint64 // optional, tell miner uninterested unless payment interval increase is greater than this +} + +// Query is a query to a given provider to determine information about a piece +// they may have available for retrieval +type Query struct { + PayloadCID cid.Cid // V0 + QueryParams // V1 +} + +// QueryUndefined is a query with no values +var QueryUndefined = Query{} + +// NewQueryV0 creates a V0 query (which only specifies a payload) +func NewQueryV0(payloadCID cid.Cid) Query { + return Query{PayloadCID: payloadCID} +} + +// NewQueryV1 creates a V1 query (which has an optional pieceCID) +func NewQueryV1(payloadCID cid.Cid, pieceCID *cid.Cid) Query { + return Query{ + PayloadCID: payloadCID, + QueryParams: QueryParams{ + PieceCID: pieceCID, + }, + } +} + +// QueryResponse is a miners response to a given retrieval query +type QueryResponse struct { + Status QueryResponseStatus + PieceCIDFound QueryItemStatus // V1 - if a PieceCID was requested, the result + //SelectorFound QueryItemStatus // V1 - if a Selector was requested, the result + + Size uint64 // Total size of piece in bytes + //ExpectedPayloadSize uint64 // V1 - optional, if PayloadCID + selector are specified and miner knows, can offer an expected size + + PaymentAddress address.Address // address to send funds to -- may be different than miner addr + MinPricePerByte abi.TokenAmount + MaxPaymentInterval uint64 + MaxPaymentIntervalIncrease uint64 + Message string + UnsealPrice abi.TokenAmount +} + +// QueryResponseUndefined is an empty QueryResponse +var QueryResponseUndefined = QueryResponse{} + +// PieceRetrievalPrice is the total price to retrieve the piece (size * MinPricePerByte + UnsealedPrice) +func (qr QueryResponse) PieceRetrievalPrice() abi.TokenAmount { + return big.Add(big.Mul(qr.MinPricePerByte, abi.NewTokenAmount(int64(qr.Size))), qr.UnsealPrice) +} + +// PayloadRetrievalPrice is the expected price to retrieve just the given payload +// & selector (V1) +//func (qr QueryResponse) PayloadRetrievalPrice() abi.TokenAmount { +// return types.BigMul(qr.MinPricePerByte, types.NewInt(qr.ExpectedPayloadSize)) +//} + +// IsTerminalError returns true if this status indicates processing of this deal +// is complete with an error +func IsTerminalError(status DealStatus) bool { + return status == DealStatusDealNotFound || + status == DealStatusFailing || + status == DealStatusRejected +} + +// IsTerminalSuccess returns true if this status indicates processing of this deal +// is complete with a success +func IsTerminalSuccess(status DealStatus) bool { + return status == DealStatusCompleted +} + +// IsTerminalStatus returns true if this status indicates processing of a deal is +// complete (either success or error) +func IsTerminalStatus(status DealStatus) bool { + return IsTerminalError(status) || IsTerminalSuccess(status) +} + +// Params are the parameters requested for a retrieval deal proposal +type Params struct { + Selector *cbg.Deferred // V1 + PieceCID *cid.Cid + PricePerByte abi.TokenAmount + PaymentInterval uint64 // when to request payment + PaymentIntervalIncrease uint64 + UnsealPrice abi.TokenAmount +} + +func (p Params) SelectorSpecified() bool { + return p.Selector != nil && !bytes.Equal(p.Selector.Raw, cbg.CborNull) +} + +// NewParamsV0 generates parameters for a retrieval deal, which is always a whole piece deal +func NewParamsV0(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64) Params { + return Params{ + PricePerByte: pricePerByte, + PaymentInterval: paymentInterval, + PaymentIntervalIncrease: paymentIntervalIncrease, + UnsealPrice: big.Zero(), + } +} + +// NewParamsV1 generates parameters for a retrieval deal, including a selector +func NewParamsV1(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64, sel ipld.Node, pieceCid *cid.Cid, unsealPrice abi.TokenAmount) (Params, error) { + var buffer bytes.Buffer + + if sel == nil { + return Params{}, xerrors.New("selector required for NewParamsV1") + } + + err := dagcbor.Encoder(sel, &buffer) + if err != nil { + return Params{}, xerrors.Errorf("error encoding selector: %w", err) + } + + return Params{ + Selector: &cbg.Deferred{Raw: buffer.Bytes()}, + PieceCID: pieceCid, + PricePerByte: pricePerByte, + PaymentInterval: paymentInterval, + PaymentIntervalIncrease: paymentIntervalIncrease, + UnsealPrice: unsealPrice, + }, nil +} + +// DealID is an identifier for a retrieval deal (unique to a client) +type DealID uint64 + +func (d DealID) String() string { + return fmt.Sprintf("%d", d) +} + +// DealProposal is a proposal for a new retrieval deal +type DealProposal struct { + PayloadCID cid.Cid + ID DealID + Params +} + +// Type method makes DealProposal usable as a voucher +func (dp *DealProposal) Type() datatransfer.TypeIdentifier { + return "RetrievalDealProposal/1" +} + +// DealProposalUndefined is an undefined deal proposal +var DealProposalUndefined = DealProposal{} + +// DealResponse is a response to a retrieval deal proposal +type DealResponse struct { + Status DealStatus + ID DealID + + // payment required to proceed + PaymentOwed abi.TokenAmount + + Message string +} + +// Type method makes DealResponse usable as a voucher result +func (dr *DealResponse) Type() datatransfer.TypeIdentifier { + return "RetrievalDealResponse/1" +} + +// DealResponseUndefined is an undefined deal response +var DealResponseUndefined = DealResponse{} + +// DealPayment is a payment for an in progress retrieval deal +type DealPayment struct { + ID DealID + PaymentChannel address.Address + PaymentVoucher *paych.SignedVoucher +} + +// Type method makes DealPayment usable as a voucher +func (dr *DealPayment) Type() datatransfer.TypeIdentifier { + return "RetrievalDealPayment/1" +} + +// DealPaymentUndefined is an undefined deal payment +var DealPaymentUndefined = DealPayment{} + +var ( + // ErrNotFound means a piece was not found during retrieval + ErrNotFound = errors.New("not found") + + // ErrVerification means a retrieval contained a block response that did not verify + ErrVerification = errors.New("Error when verify data") +) + +type Ask struct { + PricePerByte abi.TokenAmount + UnsealPrice abi.TokenAmount + PaymentInterval uint64 + PaymentIntervalIncrease uint64 +} + +// ShortfallErorr is an error that indicates a short fall of funds +type ShortfallError struct { + shortfall abi.TokenAmount +} + +// NewShortfallError returns a new error indicating a shortfall of funds +func NewShortfallError(shortfall abi.TokenAmount) error { + return ShortfallError{shortfall} +} + +// Shortfall returns the numerical value of the shortfall +func (se ShortfallError) Shortfall() abi.TokenAmount { + return se.shortfall +} +func (se ShortfallError) Error() string { + return fmt.Sprintf("Inssufficient Funds. Shortfall: %s", se.shortfall.String()) +} + +// ChannelAvailableFunds provides information about funds in a channel +type ChannelAvailableFunds struct { + // ConfirmedAmt is the amount of funds that have been confirmed on-chain + // for the channel + ConfirmedAmt abi.TokenAmount + // PendingAmt is the amount of funds that are pending confirmation on-chain + PendingAmt abi.TokenAmount + // PendingWaitSentinel can be used with PaychGetWaitReady to wait for + // confirmation of pending funds + PendingWaitSentinel *cid.Cid + // QueuedAmt is the amount that is queued up behind a pending request + QueuedAmt abi.TokenAmount + // VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain + // and in the local datastore + VoucherReedeemedAmt abi.TokenAmount +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go new file mode 100644 index 0000000000..8413261ae7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go @@ -0,0 +1,2796 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package retrievalmarket + +import ( + "fmt" + "io" + + piecestore "github.com/filecoin-project/go-fil-markets/piecestore" + multistore "github.com/filecoin-project/go-multistore" + paych "github.com/filecoin-project/specs-actors/actors/builtin/paych" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +func (t *Query) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PayloadCID (cid.Cid) (struct) + if len("PayloadCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayloadCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PayloadCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PayloadCID")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.QueryParams (retrievalmarket.QueryParams) (struct) + if len("QueryParams") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"QueryParams\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("QueryParams"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("QueryParams")); err != nil { + return err + } + + if err := t.QueryParams.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Query) UnmarshalCBOR(r io.Reader) error { + *t = Query{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Query: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PayloadCID (cid.Cid) (struct) + case "PayloadCID": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.QueryParams (retrievalmarket.QueryParams) (struct) + case "QueryParams": + + { + + if err := t.QueryParams.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.QueryParams: %w", err) + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *QueryResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{169}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Status (retrievalmarket.QueryResponseStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) + if len("PieceCIDFound") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCIDFound\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCIDFound"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCIDFound")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PieceCIDFound)); err != nil { + return err + } + + // t.Size (uint64) (uint64) + if len("Size") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Size\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Size"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Size")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + // t.PaymentAddress (address.Address) (struct) + if len("PaymentAddress") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentAddress\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentAddress"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentAddress")); err != nil { + return err + } + + if err := t.PaymentAddress.MarshalCBOR(w); err != nil { + return err + } + + // t.MinPricePerByte (big.Int) (struct) + if len("MinPricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinPricePerByte\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MinPricePerByte"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinPricePerByte")); err != nil { + return err + } + + if err := t.MinPricePerByte.MarshalCBOR(w); err != nil { + return err + } + + // t.MaxPaymentInterval (uint64) (uint64) + if len("MaxPaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPaymentInterval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MaxPaymentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MaxPaymentInterval")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MaxPaymentInterval)); err != nil { + return err + } + + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + if len("MaxPaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPaymentIntervalIncrease\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MaxPaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MaxPaymentIntervalIncrease")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MaxPaymentIntervalIncrease)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *QueryResponse) UnmarshalCBOR(r io.Reader) error { + *t = QueryResponse{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("QueryResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Status (retrievalmarket.QueryResponseStatus) (uint64) + case "Status": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = QueryResponseStatus(extra) + + } + // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) + case "PieceCIDFound": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceCIDFound = QueryItemStatus(extra) + + } + // t.Size (uint64) (uint64) + case "Size": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = uint64(extra) + + } + // t.PaymentAddress (address.Address) (struct) + case "PaymentAddress": + + { + + if err := t.PaymentAddress.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentAddress: %w", err) + } + + } + // t.MinPricePerByte (big.Int) (struct) + case "MinPricePerByte": + + { + + if err := t.MinPricePerByte.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.MinPricePerByte: %w", err) + } + + } + // t.MaxPaymentInterval (uint64) (uint64) + case "MaxPaymentInterval": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentInterval = uint64(extra) + + } + // t.MaxPaymentIntervalIncrease (uint64) (uint64) + case "MaxPaymentIntervalIncrease": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPaymentIntervalIncrease = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *DealProposal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{163}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PayloadCID (cid.Cid) (struct) + if len("PayloadCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayloadCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PayloadCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PayloadCID")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.PayloadCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) + } + + // t.ID (retrievalmarket.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.Params (retrievalmarket.Params) (struct) + if len("Params") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Params\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Params"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Params")); err != nil { + return err + } + + if err := t.Params.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *DealProposal) UnmarshalCBOR(r io.Reader) error { + *t = DealProposal{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealProposal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PayloadCID (cid.Cid) (struct) + case "PayloadCID": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) + } + + t.PayloadCID = c + + } + // t.ID (retrievalmarket.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.Params (retrievalmarket.Params) (struct) + case "Params": + + { + + if err := t.Params.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Params: %w", err) + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *DealResponse) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{164}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.ID (retrievalmarket.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentOwed (big.Int) (struct) + if len("PaymentOwed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentOwed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentOwed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentOwed")); err != nil { + return err + } + + if err := t.PaymentOwed.MarshalCBOR(w); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + return nil +} + +func (t *DealResponse) UnmarshalCBOR(r io.Reader) error { + *t = DealResponse{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealResponse: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.ID (retrievalmarket.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.PaymentOwed (big.Int) (struct) + case "PaymentOwed": + + { + + if err := t.PaymentOwed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentOwed: %w", err) + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *Params) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{166}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Selector (typegen.Deferred) (struct) + if len("Selector") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Selector\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Selector"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Selector")); err != nil { + return err + } + + if err := t.Selector.MarshalCBOR(w); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + // t.PricePerByte (big.Int) (struct) + if len("PricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PricePerByte\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PricePerByte"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PricePerByte")); err != nil { + return err + } + + if err := t.PricePerByte.MarshalCBOR(w); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + if len("PaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInterval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentInterval")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + if len("PaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentIntervalIncrease\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentIntervalIncrease")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Params) UnmarshalCBOR(r io.Reader) error { + *t = Params{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Params: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Selector (typegen.Deferred) (struct) + case "Selector": + + { + + t.Selector = new(cbg.Deferred) + + if err := t.Selector.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + // t.PricePerByte (big.Int) (struct) + case "PricePerByte": + + { + + if err := t.PricePerByte.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + case "PaymentInterval": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + case "PaymentIntervalIncrease": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *QueryParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{161}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *QueryParams) UnmarshalCBOR(r io.Reader) error { + *t = QueryParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("QueryParams: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *DealPayment) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{163}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ID (retrievalmarket.DealID) (uint64) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + // t.PaymentChannel (address.Address) (struct) + if len("PaymentChannel") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentChannel\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentChannel"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentChannel")); err != nil { + return err + } + + if err := t.PaymentChannel.MarshalCBOR(w); err != nil { + return err + } + + // t.PaymentVoucher (paych.SignedVoucher) (struct) + if len("PaymentVoucher") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentVoucher\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentVoucher"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentVoucher")); err != nil { + return err + } + + if err := t.PaymentVoucher.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *DealPayment) UnmarshalCBOR(r io.Reader) error { + *t = DealPayment{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealPayment: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ID (retrievalmarket.DealID) (uint64) + case "ID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = DealID(extra) + + } + // t.PaymentChannel (address.Address) (struct) + case "PaymentChannel": + + { + + if err := t.PaymentChannel.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentChannel: %w", err) + } + + } + // t.PaymentVoucher (paych.SignedVoucher) (struct) + case "PaymentVoucher": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.PaymentVoucher = new(paych.SignedVoucher) + if err := t.PaymentVoucher.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentVoucher pointer: %w", err) + } + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *ClientDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{181}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(w); err != nil { + return err + } + + // t.StoreID (multistore.StoreID) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(w); err != nil { + return err + } + + // t.LastPaymentRequested (bool) (bool) + if len("LastPaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LastPaymentRequested\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("LastPaymentRequested"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LastPaymentRequested")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LastPaymentRequested); err != nil { + return err + } + + // t.AllBlocksReceived (bool) (bool) + if len("AllBlocksReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AllBlocksReceived\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AllBlocksReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AllBlocksReceived")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AllBlocksReceived); err != nil { + return err + } + + // t.TotalFunds (big.Int) (struct) + if len("TotalFunds") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalFunds\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TotalFunds"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalFunds")); err != nil { + return err + } + + if err := t.TotalFunds.MarshalCBOR(w); err != nil { + return err + } + + // t.ClientWallet (address.Address) (struct) + if len("ClientWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientWallet\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ClientWallet"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientWallet")); err != nil { + return err + } + + if err := t.ClientWallet.MarshalCBOR(w); err != nil { + return err + } + + // t.MinerWallet (address.Address) (struct) + if len("MinerWallet") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWallet\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MinerWallet"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinerWallet")); err != nil { + return err + } + + if err := t.MinerWallet.MarshalCBOR(w); err != nil { + return err + } + + // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) + if len("PaymentInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInfo\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentInfo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentInfo")); err != nil { + return err + } + + if err := t.PaymentInfo.MarshalCBOR(w); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Sender (peer.ID) (string) + if len("Sender") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sender\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sender"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Sender")); err != nil { + return err + } + + if len(t.Sender) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Sender was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Sender))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Sender)); err != nil { + return err + } + + // t.TotalReceived (uint64) (uint64) + if len("TotalReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalReceived\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TotalReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalReceived")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalReceived)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.BytesPaidFor (uint64) (uint64) + if len("BytesPaidFor") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BytesPaidFor\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BytesPaidFor"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BytesPaidFor")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.BytesPaidFor)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CurrentInterval")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.PaymentRequested (big.Int) (struct) + if len("PaymentRequested") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentRequested\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentRequested"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentRequested")); err != nil { + return err + } + + if err := t.PaymentRequested.MarshalCBOR(w); err != nil { + return err + } + + // t.FundsSpent (big.Int) (struct) + if len("FundsSpent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsSpent\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FundsSpent"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsSpent")); err != nil { + return err + } + + if err := t.FundsSpent.MarshalCBOR(w); err != nil { + return err + } + + // t.UnsealFundsPaid (big.Int) (struct) + if len("UnsealFundsPaid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealFundsPaid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UnsealFundsPaid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealFundsPaid")); err != nil { + return err + } + + if err := t.UnsealFundsPaid.MarshalCBOR(w); err != nil { + return err + } + + // t.WaitMsgCID (cid.Cid) (struct) + if len("WaitMsgCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"WaitMsgCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WaitMsgCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("WaitMsgCID")); err != nil { + return err + } + + if t.WaitMsgCID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.WaitMsgCID); err != nil { + return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) + } + } + + // t.VoucherShortfall (big.Int) (struct) + if len("VoucherShortfall") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VoucherShortfall\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VoucherShortfall"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VoucherShortfall")); err != nil { + return err + } + + if err := t.VoucherShortfall.MarshalCBOR(w); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + return nil +} + +func (t *ClientDealState) UnmarshalCBOR(r io.Reader) error { + *t = ClientDealState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (retrievalmarket.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.StoreID (multistore.StoreID) (uint64) + case "StoreID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := multistore.StoreID(extra) + t.StoreID = &typed + } + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + if err := t.ChannelID.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.LastPaymentRequested (bool) (bool) + case "LastPaymentRequested": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LastPaymentRequested = false + case 21: + t.LastPaymentRequested = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.AllBlocksReceived (bool) (bool) + case "AllBlocksReceived": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AllBlocksReceived = false + case 21: + t.AllBlocksReceived = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.TotalFunds (big.Int) (struct) + case "TotalFunds": + + { + + if err := t.TotalFunds.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) + } + + } + // t.ClientWallet (address.Address) (struct) + case "ClientWallet": + + { + + if err := t.ClientWallet.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) + } + + } + // t.MinerWallet (address.Address) (struct) + case "MinerWallet": + + { + + if err := t.MinerWallet.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) + } + + } + // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) + case "PaymentInfo": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.PaymentInfo = new(PaymentInfo) + if err := t.PaymentInfo.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.Sender (peer.ID) (string) + case "Sender": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Sender = peer.ID(sval) + } + // t.TotalReceived (uint64) (uint64) + case "TotalReceived": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalReceived = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.BytesPaidFor (uint64) (uint64) + case "BytesPaidFor": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BytesPaidFor = uint64(extra) + + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.PaymentRequested (big.Int) (struct) + case "PaymentRequested": + + { + + if err := t.PaymentRequested.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) + } + + } + // t.FundsSpent (big.Int) (struct) + case "FundsSpent": + + { + + if err := t.FundsSpent.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) + } + + } + // t.UnsealFundsPaid (big.Int) (struct) + case "UnsealFundsPaid": + + { + + if err := t.UnsealFundsPaid.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealFundsPaid: %w", err) + } + + } + // t.WaitMsgCID (cid.Cid) (struct) + case "WaitMsgCID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) + } + + t.WaitMsgCID = &c + } + + } + // t.VoucherShortfall (big.Int) (struct) + case "VoucherShortfall": + + { + + if err := t.VoucherShortfall.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.VoucherShortfall: %w", err) + } + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{171}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.DealProposal (retrievalmarket.DealProposal) (struct) + if len("DealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(w); err != nil { + return err + } + + // t.StoreID (multistore.StoreID) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StoreID)); err != nil { + return err + } + + // t.ChannelID (datatransfer.ChannelID) (struct) + if len("ChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ChannelID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ChannelID")); err != nil { + return err + } + + if err := t.ChannelID.MarshalCBOR(w); err != nil { + return err + } + + // t.PieceInfo (piecestore.PieceInfo) (struct) + if len("PieceInfo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceInfo\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceInfo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceInfo")); err != nil { + return err + } + + if err := t.PieceInfo.MarshalCBOR(w); err != nil { + return err + } + + // t.Status (retrievalmarket.DealStatus) (uint64) + if len("Status") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Status\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Status"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Status")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.Receiver (peer.ID) (string) + if len("Receiver") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Receiver\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Receiver"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Receiver")); err != nil { + return err + } + + if len(t.Receiver) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Receiver was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Receiver))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Receiver)); err != nil { + return err + } + + // t.TotalSent (uint64) (uint64) + if len("TotalSent") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TotalSent\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TotalSent"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TotalSent")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSent)); err != nil { + return err + } + + // t.FundsReceived (big.Int) (struct) + if len("FundsReceived") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReceived\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FundsReceived"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReceived")); err != nil { + return err + } + + if err := t.FundsReceived.MarshalCBOR(w); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.CurrentInterval (uint64) (uint64) + if len("CurrentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CurrentInterval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CurrentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CurrentInterval")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrentInterval)); err != nil { + return err + } + + // t.LegacyProtocol (bool) (bool) + if len("LegacyProtocol") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"LegacyProtocol\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("LegacyProtocol"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("LegacyProtocol")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.LegacyProtocol); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) error { + *t = ProviderDealState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealProposal (retrievalmarket.DealProposal) (struct) + case "DealProposal": + + { + + if err := t.DealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + } + + } + // t.StoreID (multistore.StoreID) (uint64) + case "StoreID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.StoreID = multistore.StoreID(extra) + + } + // t.ChannelID (datatransfer.ChannelID) (struct) + case "ChannelID": + + { + + if err := t.ChannelID.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelID: %w", err) + } + + } + // t.PieceInfo (piecestore.PieceInfo) (struct) + case "PieceInfo": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.PieceInfo = new(piecestore.PieceInfo) + if err := t.PieceInfo.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PieceInfo pointer: %w", err) + } + } + + } + // t.Status (retrievalmarket.DealStatus) (uint64) + case "Status": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = DealStatus(extra) + + } + // t.Receiver (peer.ID) (string) + case "Receiver": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Receiver = peer.ID(sval) + } + // t.TotalSent (uint64) (uint64) + case "TotalSent": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSent = uint64(extra) + + } + // t.FundsReceived (big.Int) (struct) + case "FundsReceived": + + { + + if err := t.FundsReceived.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.CurrentInterval (uint64) (uint64) + case "CurrentInterval": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentInterval = uint64(extra) + + } + // t.LegacyProtocol (bool) (bool) + case "LegacyProtocol": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.LegacyProtocol = false + case 21: + t.LegacyProtocol = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PayCh (address.Address) (struct) + if len("PayCh") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PayCh\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PayCh"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PayCh")); err != nil { + return err + } + + if err := t.PayCh.MarshalCBOR(w); err != nil { + return err + } + + // t.Lane (uint64) (uint64) + if len("Lane") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Lane\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Lane"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Lane")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Lane)); err != nil { + return err + } + + return nil +} + +func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error { + *t = PaymentInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PaymentInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PayCh (address.Address) (struct) + case "PayCh": + + { + + if err := t.PayCh.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PayCh: %w", err) + } + + } + // t.Lane (uint64) (uint64) + case "Lane": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Lane = uint64(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *RetrievalPeer) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{163}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Address (address.Address) (struct) + if len("Address") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Address\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Address"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Address")); err != nil { + return err + } + + if err := t.Address.MarshalCBOR(w); err != nil { + return err + } + + // t.ID (peer.ID) (string) + if len("ID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ID")); err != nil { + return err + } + + if len(t.ID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ID was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ID)); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if t.PieceCID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + } + + return nil +} + +func (t *RetrievalPeer) UnmarshalCBOR(r io.Reader) error { + *t = RetrievalPeer{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("RetrievalPeer: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Address (address.Address) (struct) + case "Address": + + { + + if err := t.Address.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.ID (peer.ID) (string) + case "ID": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.ID = peer.ID(sval) + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = &c + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *Ask) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{164}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PricePerByte (big.Int) (struct) + if len("PricePerByte") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PricePerByte\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PricePerByte"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PricePerByte")); err != nil { + return err + } + + if err := t.PricePerByte.MarshalCBOR(w); err != nil { + return err + } + + // t.UnsealPrice (big.Int) (struct) + if len("UnsealPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UnsealPrice\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UnsealPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UnsealPrice")); err != nil { + return err + } + + if err := t.UnsealPrice.MarshalCBOR(w); err != nil { + return err + } + + // t.PaymentInterval (uint64) (uint64) + if len("PaymentInterval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentInterval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentInterval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentInterval")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PaymentInterval)); err != nil { + return err + } + + // t.PaymentIntervalIncrease (uint64) (uint64) + if len("PaymentIntervalIncrease") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PaymentIntervalIncrease\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PaymentIntervalIncrease"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PaymentIntervalIncrease")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease)); err != nil { + return err + } + + return nil +} + +func (t *Ask) UnmarshalCBOR(r io.Reader) error { + *t = Ask{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Ask: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.PricePerByte (big.Int) (struct) + case "PricePerByte": + + { + + if err := t.PricePerByte.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) + } + + } + // t.UnsealPrice (big.Int) (struct) + case "UnsealPrice": + + { + + if err := t.UnsealPrice.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.UnsealPrice: %w", err) + } + + } + // t.PaymentInterval (uint64) (uint64) + case "PaymentInterval": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentInterval = uint64(extra) + + } + // t.PaymentIntervalIncrease (uint64) (uint64) + case "PaymentIntervalIncrease": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PaymentIntervalIncrease = uint64(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared/selectors.go b/vendor/github.com/filecoin-project/go-fil-markets/shared/selectors.go new file mode 100644 index 0000000000..3e3bb91aae --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/shared/selectors.go @@ -0,0 +1,16 @@ +package shared + +import ( + "github.com/ipld/go-ipld-prime" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" +) + +// entire DAG selector +func AllSelector() ipld.Node { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + return ssb.ExploreRecursive(selector.RecursionLimitNone(), + ssb.ExploreAll(ssb.ExploreRecursiveEdge())). + Node() +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared/types.go b/vendor/github.com/filecoin-project/go-fil-markets/shared/types.go new file mode 100644 index 0000000000..7f752a236c --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/shared/types.go @@ -0,0 +1,7 @@ +package shared + +// TipSetToken is the implementation-nonspecific identity for a tipset. +type TipSetToken []byte + +// Unsubscribe is a function that gets called to unsubscribe from (storage|retrieval)market events +type Unsubscribe func() diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/README.md b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/README.md new file mode 100644 index 0000000000..d89c02c40b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/README.md @@ -0,0 +1,370 @@ +# storagemarket +The storagemarket module is intended for Filecoin node implementations written in Go. +It implements functionality to allow execution of storage market deals, and for Providers to set their storage price on the Filecoin network. +The node implementation must provide access to chain operations, and persistent +data storage. + +## Table of Contents +* [Background reading](#background-reading) +* [Installation](#Installation) +* [Operation](#Operation) +* [Implementation](#Implementation) + * [StorageCommon](#StorageCommon) + * [StorageClientNode](#StorageClientNode) + * [StorageProviderNode](#StorageProviderNode) +* [Technical Documentation](#technical-documentation) + +## Background reading + +Please see the +[Filecoin Storage Market Specification](https://filecoin-project.github.io/specs/#systems__filecoin_markets__storage_market). + +## Installation +The build process for storagemarket requires Go >= v1.13. + +To install: +```bash +go get github.com/filecoin-project/go-fil-markets/storagemarket +``` + +## Operation +The `storagemarket` package provides high level APIs to execute data storage deals between a +storage client and a storage provider (a.k.a. storage miner) on the Filecoin network. +The Filecoin node must implement the [`StorageCommon`](#StorageCommon), [`StorageProviderNode`](#StorageProviderNode), and +[`StorageClientNode`](#StorageClientNode) interfaces in order to construct and use the module. + +Deals are expected to survive a node restart; deals and related information are + expected to be stored on disk. + +`storagemarket` communicates its deal operations and requested data via + [go-data-transfer](https://github.com/filecoin-project/go-data-transfer) using + [go-graphsync](https://github.com/ipfs/go-graphsync). + +## Implementation + +### General Steps +1. Decide if your node can be configured as a Storage Provider, a Storage Client or both. +1. Determine how and where your retrieval calls to StorageProvider and StorageClient functions + will be made. +1. Implement the required interfaces as described in this section. +1. Construct a [StorageClient](#StorageClient) and/or [StorageProvider](#StorageProvider) in your node's startup. +Call the StorageProvider's `Start` function it in the appropriate place, and its `Stop` +function in the appropriate place. +1. Expose desired `storagemarket` functionality to whatever internal modules desired, such as + command line interface, JSON RPC, or HTTP API. + +Implement the [`StorageCommon`](#StorageCommon), [`StorageProviderNode`](#StorageProviderNode), and + [`StorageClientNode`](#StorageClientNode) interfaces in + [storagemarket/types.go](./types.go), described below: + +### StorageCommon +`StorageCommon` is an interface common to both `StorageProviderNode` and `StorageClientNode`. Its + functions are: +* [`GetChainHead`](#GetChainHead) +* [`AddFunds`](#AddFunds) +* [`EnsureFunds`](#EnsureFunds) +* [`GetBalance`](#GetBalance) +* [`VerifySignature`](#VerifySignature) +* [`WaitForMessage`](#WaitForMessage) +* [`SignBytes`](#SignBytes) +* [`GetMinerWorkerAddress`](#GetMinerWorkerAddress) + +#### AddFunds +```go +func AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) +``` + +Send `amount` to `addr` by posting a message on chain. Return the message CID. + +#### EnsureFunds +```go +func EnsureFunds(ctx context.Context, addr, wallet address.Address, amount abi.TokenAmount, + tok shared.TipSetToken) (cid.Cid, error) +``` + +Make sure `addr` has `amount` funds and if not, `wallet` should send any needed balance to + `addr` by posting a message on chain. Returns the message CID. + +#### GetBalance +```go +func GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (Balance, error) +``` +Retrieve the Balance of FIL in `addr`. A `Balance` consists of `Locked` and `Available` `abi.TokenAmount`s + +#### VerifySignature +```go +func VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, + plaintext []byte, tok shared.TipSetToken) (bool, error) +``` +Verify that `signature` is valid, cryptographically and otherwise, for the +given `signer`, `plaintext`, and `tok`. + +#### WaitForMessage +```go +func WaitForMessage(ctx context.Context, mcid cid.Cid, + onCompletion func(exitcode.ExitCode, []byte, error) error) error +``` +Wait for message CID `mcid` to appear on chain, and call `onCompletion` when it does so. + +#### SignBytes +```go +func SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) +``` + +Cryptographically sign bytes `b` using the private key referenced by address `signer`. + +#### GetMinerWorkerAddress +```go +func GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken, + ) (address.Address, error) +``` + +Get the miner worker address for the given miner owner, as of `tok`. + +--- +### StorageProviderNode +`StorageProviderNode` is the interface for dependencies for a `StorageProvider`. It contains: + +* [`StorageCommon`](#StorageCommon) interface +* [`PublishDeals`](#PublishDeals) +* [`ListProviderDeals`](#ListProviderDeals) +* [`OnDealSectorCommitted`](#OnDealSectorCommitted) +* [`LocatePieceForDealWithinSector`](#LocatePieceForDealWithinSector) +* [`OnDealExpiredOrSlashed`](#OnDealExpiredOrSlashed) + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### PublishDeals +```go +func PublishDeals(ctx context.Context, deal MinerDeal) (cid.Cid, error) +``` +Post the deal to chain, returning the posted message CID. + +#### OnDealComplete +```go +func OnDealComplete(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, + pieceReader io.Reader) error +``` +The function to be called when MinerDeal `deal` has reached the `storagemarket.StorageDealCompleted` state. +A `MinerDeal` contains more information than a StorageDeal, including paths, addresses, and CIDs +pertinent to the deal. See [storagemarket/types.go](./types.go) + +#### OnDealSectorCommitted +```go +func OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, + cb DealSectorCommittedCallback) error +``` + +Register the function to be called once `provider` has committed sector(s) for `dealID`. + +#### LocatePieceForDealWithinSector +```go +func LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, tok shared.TipSetToken, + ) (sectorID uint64, offset uint64, length uint64, err error) +``` + +Find the piece associated with `dealID` as of `tok` and return the sector id, plus the offset and + length of the data within the sector. + +#### OnDealExpiredOrSlashed +```go +func OnDealExpiredOrSlashed( + ctx context.Context, + dealID abi.DealID, + onDealExpired DealExpiredCallback, + onDealSlashed DealSlashedCallback) error +``` + +Register callbacks to be called when a deal expires or is slashed. + +--- +### StorageClientNode +`StorageClientNode` implements dependencies for a StorageClient. It contains: +* [`StorageCommon`](#StorageCommon) interface +* [`GetChainHead`](#GetChainHead) +* [`ListClientDeals`](#ListClientDeals) +* [`ListStorageProviders`](#ListStorageProviders) +* [`ValidatePublishedDeal`](#ValidatePublishedDeal) +* [`SignProposal`](#SignProposal) +* [`GetDefaultWalletAddress`](#GetDefaultWalletAddress) +* [`OnDealSectorCommitted`](#OnDealSectorCommitted) +* [`OnDealExpiredOrSlashed`](#OnDealExpiredOrSlashed) + +#### StorageCommon +`StorageClientNode` implements `StorageCommon`, described above. + +#### GetChainHead +```go +func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) +``` +Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. + +#### ListStorageProviders +```go +func ListStorageProviders(ctx context.Context, tok shared.TipSetToken + ) ([]*StorageProviderInfo, error) +``` + +Return a slice of `StorageProviderInfo`, for all known storage providers. + +#### ValidatePublishedDeal +```go +func ValidatePublishedDeal(ctx context.Context, deal ClientDeal) (abi.DealID, error) +``` +Query the chain for `deal` and inspect the message parameters to make sure they match the expected deal. Return the deal ID. + +#### SignProposal +```go +func SignProposal(ctx context.Context, signer address.Address, proposal market.DealProposal + ) (*market.ClientDealProposal, error) +``` + +Cryptographically sign `proposal` using the private key of `signer` and return a + ClientDealProposal (includes signature data). + +#### GetDefaultWalletAddress +```go +func GetDefaultWalletAddress(ctx context.Context) (address.Address, error) +``` + +Get the Client's default wallet address, which will be used to add Storage Market funds (collateral and payment). + +#### OnDealSectorCommitted +```go +func OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, + cb DealSectorCommittedCallback) error +``` + +Register a callback to be called once the Deal's sector(s) are committed. + +#### OnDealExpiredOrSlashed +```go +func OnDealExpiredOrSlashed( + ctx context.Context, + dealID abi.DealID, + onDealExpired DealExpiredCallback, + onDealSlashed DealSlashedCallback) error +``` + +Register callbacks to be called when a deal expires or is slashed. + +#### GetMinerInfo +```go +func GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken, + ) (*StorageProviderInfo, error) +``` + +Returns `StorageProviderInfo` for a specific provider at the given address + + +## Construction + +### StorageClient +To construct a new StorageClient: +```go +func NewClient( + net network.StorageMarketNetwork, + bs blockstore.Blockstore, + dataTransfer datatransfer.Manager, + discovery *discovery.Local, + ds datastore.Batching, + scn storagemarket.StorageClientNode, +) (*Client, error) +``` +**Parameters** + +* `net network.StorageMarketNetwork` is a network abstraction for the storage market. To create it, use: + ```go + package network + func NewFromLibp2pHost(h host.Host) StorageMarketNetwork + ``` +* `bs blockstore.Blockstore` is an IPFS blockstore for storing and retrieving data for deals. + See [github.com/ipfs/go-ipfs-blockstore](github.com/ipfs/go-ipfs-blockstore). +* `dataTransfer datatransfer.Manager` is an interface from [github.com/filecoin-project/go-data-transfer](https://github.com/filecoin-project/go-data-transfer) + There is more than one implementation, but one way to create a new datatransfer.Manager is: + ```go + package graphsyncimpl + + func NewGraphSyncDataTransfer(host host.Host, gs graphsync.GraphExchange, storedCounter *storedcounter.StoredCounter) datatransfer.Manager + ``` + Also: + ```go + package datatransfer + + // NewDAGServiceDataTransfer returns a data transfer manager based on + // an IPLD DAGService + func NewDAGServiceDataTransfer(dag ipldformat.DAGService) datatransfer.Manager + ``` + + Please see the [go-data-transfer repo](https://github.com/filecoin-project/go-data-transfer) for more information. + +* `discovery *discovery.Local` implements the `PeerResolver` interface. To initialize a new discovery.Local: + ```go + func NewLocal(ds datastore.Batching) *Local + ``` +* `ds datastore.Batching` is a datastore for the deal's state machine. It is + typically the node's own datastore that implements the IPFS datastore.Batching interface. + See + [github.com/ipfs/go-datastore](https://github.com/ipfs/go-datastore). + +* `scn storagemarket.StorageClientNode` is the implementation of the [`StorageClientNode`](#StorageClientNode) API +that was written for your node. + +### StorageProvider +To construct a new StorageProvider: +```go +func NewProvider(net network.StorageMarketNetwork, + ds datastore.Batching, + bs blockstore.Blockstore, + fs filestore.FileStore, + pieceStore piecestore.PieceStore, + dataTransfer datatransfer.Manager, + spn storagemarket.StorageProviderNode, + minerAddress address.Address, + rt abi.RegisteredProof, + storedAsk StoredAsk, + options ...StorageProviderOption, +) (storagemarket.StorageProvider, error) { +``` + +**Parameters** +* `net network.StorageMarketNetwork` is the same interface as for [StorageClientNode](#StorageClientNode) +* `ds datastore.Batching` is the same interface as for [StorageClientNode](#StorageClientNode) +* `bs blockstore.Blockstore` is the same interface as for [StorageClientNode](#StorageClientNode) +* `fs filestore.FileStore` is an instance of the [filestore.FileStore](../filestore) struct from the + go-fil-markets repo. +* `pieceStore piecestore.PieceStore` is the database of deals and pieces associated with them. +See this repo's [piecestore module](../piecestore). +* `dataTransfer` is the same interface as for [StorageClientNode](#StorageClientNode) +* `spn storagemarket.StorageProviderNode` is the implementation of the [`StorageProviderNode`](#StorageProviderNode) API + that was written for your node. +* `minerAddress address.Address` is the miner owner address. +* `rt abi.RegisteredProof` is an int64 indicating the type of proof to use when generating a piece commitment (CommP). + see [github.com/filecoin-project/go-state-types/abi/sector.go](https://github.com/filecoin-project/specs-actors/blob/master/actors/abi/sector.go) + for the list and meaning of accepted values. +* `storedAsk StoredAsk` is an interface for getting and adding storage Asks. It is implemented in storagemarket. + To create a `StoredAsk`: + ```go + package storedask + func NewStoredAsk(ds datastore.Batching, dsKey datastore.Key, spn storagemarket.StorageProviderNode, + actor address.Address) (*StoredAsk, error) + ``` +* `options ...StorageProviderOption` options is a variable length parameter to provide functions that change the + StorageProvider default configuration. See [provider.go](./impl/provider.go) for the available options. + +## Technical Documentation + +* [GoDoc](https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket) contains an architectural overview and robust API documentation + +* Storage Client FSM diagram: + +[![Diagram of StorageClientFSM](../docs/storageclient.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageclient.mmd.svg) + + +* Storage Provider FSM diagram: + +[![Diagram of StorageClientFSM](../docs/storageprovider.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageprovider.mmd.svg) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/client.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/client.go new file mode 100644 index 0000000000..e31ddd7e9a --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/client.go @@ -0,0 +1,56 @@ +package storagemarket + +import ( + "context" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// ClientSubscriber is a callback that is run when events are emitted on a StorageClient +type ClientSubscriber func(event ClientEvent, deal ClientDeal) + +// StorageClient is a client interface for making storage deals with a StorageProvider +type StorageClient interface { + + // Start initializes deal processing on a StorageClient and restarts + // in progress deals + Start(ctx context.Context) error + + // OnReady registers a listener for when the client comes on line + OnReady(shared.ReadyFunc) + + // Stop ends deal processing on a StorageClient + Stop() error + + // ListProviders queries chain state and returns active storage providers + ListProviders(ctx context.Context) (<-chan StorageProviderInfo, error) + + // ListLocalDeals lists deals initiated by this storage client + ListLocalDeals(ctx context.Context) ([]ClientDeal, error) + + // GetLocalDeal lists deals that are in progress or rejected + GetLocalDeal(ctx context.Context, cid cid.Cid) (ClientDeal, error) + + // GetAsk returns the current ask for a storage provider + GetAsk(ctx context.Context, info StorageProviderInfo) (*StorageAsk, error) + + // GetProviderDealState queries a provider for the current state of a client's deal + GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*ProviderDealState, error) + + // ProposeStorageDeal initiates deal negotiation with a Storage Provider + ProposeStorageDeal(ctx context.Context, params ProposeStorageDealParams) (*ProposeStorageDealResult, error) + + // GetPaymentEscrow returns the current funds available for deal payment + GetPaymentEscrow(ctx context.Context, addr address.Address) (Balance, error) + + // AddStorageCollateral adds storage collateral + AddPaymentEscrow(ctx context.Context, addr address.Address, amount abi.TokenAmount) error + + // SubscribeToEvents listens for events that happen related to storage deals on a provider + SubscribeToEvents(subscriber ClientSubscriber) shared.Unsubscribe +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/dealstatus.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/dealstatus.go new file mode 100644 index 0000000000..156c335746 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/dealstatus.go @@ -0,0 +1,138 @@ +package storagemarket + +// StorageDealStatus is the local status of a StorageDeal. +// Note: this status has meaning in the context of this module only - it is not +// recorded on chain +type StorageDealStatus = uint64 + +const ( + // StorageDealUnknown means the current status of a deal is undefined + StorageDealUnknown = StorageDealStatus(iota) + + // StorageDealProposalNotFound is a status returned in responses when the deal itself cannot + // be located + StorageDealProposalNotFound + + // StorageDealProposalRejected is returned by a StorageProvider when it chooses not to accept + // a DealProposal + StorageDealProposalRejected + + // StorageDealProposalAccepted indicates an intent to accept a storage deal proposal + StorageDealProposalAccepted + + // StorageDealStaged means a deal has been published and data is ready to be put into a sector + StorageDealStaged + + // StorageDealSealing means a deal is in a sector that is being sealed + StorageDealSealing + + // StorageDealFinalizing means a deal is in a sealed sector and we're doing final + // housekeeping before marking it active + StorageDealFinalizing + + // StorageDealActive means a deal is in a sealed sector and the miner is proving the data + // for the deal + StorageDealActive + + // StorageDealExpired means a deal has passed its final epoch and is expired + StorageDealExpired + + // StorageDealSlashed means the deal was in a sector that got slashed from failing to prove + StorageDealSlashed + + // StorageDealRejecting means the Provider has rejected the deal, and will send a rejection response + StorageDealRejecting + + // StorageDealFailing means something has gone wrong in a deal. Once data is cleaned up the deal will finalize on + // StorageDealError + StorageDealFailing + + // StorageDealFundsEnsured means we've deposited funds as necessary to create a deal, ready to move forward + StorageDealFundsEnsured + + // StorageDealCheckForAcceptance means the client is waiting for a provider to seal and publish a deal + StorageDealCheckForAcceptance + + // StorageDealValidating means the provider is validating that deal parameters are good for a proposal + StorageDealValidating + + // StorageDealAcceptWait means the provider is running any custom decision logic to decide whether or not to accept the deal + StorageDealAcceptWait + + // StorageDealStartDataTransfer means data transfer is beginning + StorageDealStartDataTransfer + + // StorageDealTransferring means data is being sent from the client to the provider via the data transfer module + StorageDealTransferring + + // StorageDealWaitingForData indicates either a manual transfer + // or that the provider has not received a data transfer request from the client + StorageDealWaitingForData + + // StorageDealVerifyData means data has been transferred and we are attempting to verify it against the PieceCID + StorageDealVerifyData + + // StorageDealEnsureProviderFunds means that provider is making sure it has adequate funds for the deal in the StorageMarketActor + StorageDealEnsureProviderFunds + + // StorageDealEnsureClientFunds means that client is making sure it has adequate funds for the deal in the StorageMarketActor + StorageDealEnsureClientFunds + + // StorageDealProviderFunding means that the provider has deposited funds in the StorageMarketActor and it is waiting + // to see the funds appear in its balance + StorageDealProviderFunding + + // StorageDealClientFunding means that the client has deposited funds in the StorageMarketActor and it is waiting + // to see the funds appear in its balance + StorageDealClientFunding + + // StorageDealPublish means the deal is ready to be published on chain + StorageDealPublish + + // StorageDealPublishing means the deal has been published but we are waiting for it to appear on chain + StorageDealPublishing + + // StorageDealError means the deal has failed due to an error, and no further updates will occur + StorageDealError + + // StorageDealProviderTransferRestart means a storage deal data transfer from client to provider will be restarted + // by the provider + StorageDealProviderTransferRestart + + // StorageDealClientTransferRestart means a storage deal data transfer from client to provider will be restarted + // by the client + StorageDealClientTransferRestart +) + +// DealStates maps StorageDealStatus codes to string names +var DealStates = map[StorageDealStatus]string{ + StorageDealUnknown: "StorageDealUnknown", + StorageDealProposalNotFound: "StorageDealProposalNotFound", + StorageDealProposalRejected: "StorageDealProposalRejected", + StorageDealProposalAccepted: "StorageDealProposalAccepted", + StorageDealAcceptWait: "StorageDealAcceptWait", + StorageDealStartDataTransfer: "StorageDealStartDataTransfer", + StorageDealStaged: "StorageDealStaged", + StorageDealSealing: "StorageDealSealing", + StorageDealActive: "StorageDealActive", + StorageDealExpired: "StorageDealExpired", + StorageDealSlashed: "StorageDealSlashed", + StorageDealRejecting: "StorageDealRejecting", + StorageDealFailing: "StorageDealFailing", + StorageDealFundsEnsured: "StorageDealFundsEnsured", + StorageDealCheckForAcceptance: "StorageDealCheckForAcceptance", + StorageDealValidating: "StorageDealValidating", + StorageDealTransferring: "StorageDealTransferring", + StorageDealWaitingForData: "StorageDealWaitingForData", + StorageDealVerifyData: "StorageDealVerifyData", + StorageDealEnsureProviderFunds: "StorageDealEnsureProviderFunds", + StorageDealEnsureClientFunds: "StorageDealEnsureClientFunds", + StorageDealProviderFunding: "StorageDealProviderFunding", + StorageDealClientFunding: "StorageDealClientFunding", + StorageDealPublish: "StorageDealPublish", + StorageDealPublishing: "StorageDealPublishing", + StorageDealError: "StorageDealError", + StorageDealFinalizing: "StorageDealFinalizing", + StorageDealClientTransferRestart: "StorageDealClientTransferRestart", + StorageDealProviderTransferRestart: "StorageDealProviderTransferRestart", +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/doc.go new file mode 100644 index 0000000000..7d912bbbc6 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/doc.go @@ -0,0 +1,124 @@ +/* +Package storagemarket implements the Filecoin storage protocol. + +An overview of the storage protocol can be found in the Filecoin specification: + +https://filecoin-project.github.io/specs/#systems__filecoin_markets__storage_market + +The following architectural components provide a brief overview of the design of +the storagemarket module: + +Public Interfaces And Node Dependencies + +A core goal of this module is to isolate the negotiation of deals from the actual chain operations +performed by the node to put the deal on chain. The module primarily orchestrates the storage deal +flow, rather than performing specific chain operations which are delegated to the node. + +As such, for both the client and the provider in the storage market, the module defines a top level +public interface which it provides an implementation for, and a node interface that must be implemented +by the Filecoin node itself, and provided as a dependency. These node interfaces provide a universal way to +talk to potentially multiple different Filecoin node implementations, and can be implemented using HTTP +or some other interprocess communication to talk to a node implementation running in a different process. + +The top level interfaces this package implements are StorageClient & StorageProvider. The dependencies the Filecoin +node is expected to implement are StorageClientNode & StorageProviderNode. Further documentation of exactly what those +dependencies should do can be found in the readme. + +Finite State Machines and Resumability + +Making deals in Filecoin is a highly asynchronous process. For a large piece of data, it's possible that the entire +process of proposing a deal, transferring data, publishing the deal, putting the data in a sector and sealing it +could take hours or even days. Not surprisingly, many things can go wrong along the way. To manage the process +of orchestrating deals, we use finite state machines that update deal state when discrete events occur. State updates +always persist state to disk. This means we have a permanent record of exactly what's going on with deals at any time, +and we can ideally survive our Filecoin processes shutting down and restarting. + +The following diagrams visualize the statemachine flows for the client and the provider: + +Client FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageclient.mmd.svg + +Provider FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageprovider.mmd.svg + +Identifying Providers For A Deal + +The StorageClient provides two functions to locate a provider with whom to make a deal: + +`ListProviders` returns a list of storage providers on the Filecoin network. This list is assembled by +querying the chain state for active storage miners. + +`QueryAsk` queries a single provider for more specific details about the kinds of deals they accept, as +expressed through a `StorageAsk`. + +Deal Flow + +The primary mechanism for initiating storage deals is the `ProposeStorageDeal` method on the StorageClient. + +When `ProposeStorageDeal` is called, it constructs and signs a DealProposal, initiates tracking of deal state +and hands the deal to the Client FSM, returning the CID of the DealProposal which constitutes the identifier for +that deal. + +After some preparation steps, the FSM will send the deal proposal to the StorageProvider, which receives the deal +in `HandleDealStream`. `HandleDealStream` initiates tracking of deal state on the Provider side and hands the deal to +the Provider FSM, which handles the rest of deal flow. + +From this point forward, deal negotiation is completely asynchronous and runs in the FSMs. + +A user of the modules can monitor deal progress through `SubscribeToEvents` methods on StorageClient and StorageProvider, +or by simply calling `ListLocalDeals` to get all deal statuses. + +The FSMs implement every step in deal negotiation up to deal publishing. However, adding the deal to a sector and sealing +it is handled outside this module. When a deal is published, the StorageProvider calls `OnDealComplete` on the StorageProviderNode +interface (the node itself likely delegates management of sectors and sealing to an implementation of the Storage Mining subsystem +of the Filecoin spec). At this point, the markets implementations essentially shift to being monitors of deal progression: +they wait to see and record when the deal becomes active and later expired or slashed. + +When a deal becomes active on chain, the provider records the location of where it's stored in a sector in the PieceStore, +so that it's available for retrieval. + +Major Dependencies + +Other libraries in go-fil-markets: + +https://github.com/filecoin-project/go-fil-markets/tree/master/filestore - used to store pieces and other +temporary data before it's transferred to either a sector or the PieceStore. + +https://github.com/filecoin-project/go-fil-markets/tree/master/pieceio - used to convert back and forth between raw +payload data and pieces that fit in sector. Also provides utilities for generating CommP. + +https://github.com/filecoin-project/go-fil-markets/tree/master/piecestore - used to write information about where data +lives in sectors so that it can later be retrieved. + +https://github.com/filecoin-project/go-fil-markets/tree/master/shared - types and utility functions shared with +retrievalmarket package. + +Other Filecoin Repos: + +https://github.com/filecoin-project/go-data-transfer - for transferring data, via go-graphsync + +https://github.com/filecoin-project/go-statemachine - a finite state machine that tracks deal state + +https://github.com/filecoin-project/go-storedcounter - for generating and persisting unique deal IDs + +https://github.com/filecoin-project/specs-actors - the Filecoin actors + +IPFS Project Repos: + +https://github.com/ipfs/go-graphsync - used by go-data-transfer + +https://github.com/ipfs/go-datastore - for persisting statemachine state for deals + +https://github.com/ipfs/go-ipfs-blockstore - for storing and retrieving block data for deals + +Other Repos: + +https://github.com/libp2p/go-libp2p) the network over which retrieval deal data is exchanged. + +https://github.com/hannahhoward/go-pubsub - for pub/sub notifications external to the statemachine + +Root Package + +This top level package defines top level enumerations and interfaces. The primary implementation +lives in the `impl` directory + +*/ +package storagemarket diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/events.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/events.go new file mode 100644 index 0000000000..391ecbe5a0 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/events.go @@ -0,0 +1,306 @@ +package storagemarket + +// ClientEvent is an event that happens in the client's deal state machine +type ClientEvent uint64 + +const ( + // ClientEventOpen indicates a new deal was started + ClientEventOpen ClientEvent = iota + + // ClientEventEnsureFundsFailed happens when attempting to ensure the client has enough funds available fails + ClientEventEnsureFundsFailed + + // ClientEventFundingInitiated happens when a client has sent a message adding funds to its balance + ClientEventFundingInitiated + + // ClientEventFundsReserved happens when a client reserves funds for a deal (updating our tracked funds) + ClientEventFundsReserved + + // ClientEventFundsReleased happens when a client released funds for a deal (updating our tracked funds) + ClientEventFundsReleased + + // ClientEventFundsEnsured happens when a client successfully ensures it has funds for a deal + ClientEventFundsEnsured + + // ClientEventWriteProposalFailed indicates an attempt to send a deal proposal to a provider failed + ClientEventWriteProposalFailed + + // ClientEventInitiateDataTransfer happens when a a client is ready to transfer data to a provider + ClientEventInitiateDataTransfer + + // ClientEventDataTransferInitiated happens when piece data transfer has started + ClientEventDataTransferInitiated + + // ClientEventDataTransferRestarted happens when a data transfer from client to provider is restarted by the client + ClientEventDataTransferRestarted + + // ClientEventDataTransferComplete happens when piece data transfer has been completed + ClientEventDataTransferComplete + + // ClientEventWaitForDealState happens when the client needs to continue waiting for an actionable deal state + ClientEventWaitForDealState + + // ClientEventDataTransferFailed happens the client can't initiate a push data transfer to the provider + ClientEventDataTransferFailed + + // ClientEventDataTransferRestartFailed happens when the client can't restart an existing data transfer + ClientEventDataTransferRestartFailed + + // ClientEventReadResponseFailed means a network error occurred reading a deal response + ClientEventReadResponseFailed + + // ClientEventResponseVerificationFailed means a response was not verified + ClientEventResponseVerificationFailed + + // ClientEventResponseDealDidNotMatch means a response was sent for the wrong deal + ClientEventResponseDealDidNotMatch + + // ClientEventUnexpectedDealState means a response was sent but the state wasn't what we expected + ClientEventUnexpectedDealState + + // ClientEventStreamCloseError happens when an attempt to close a deals stream fails + ClientEventStreamCloseError + + // ClientEventDealRejected happens when the provider does not accept a deal + ClientEventDealRejected + + // ClientEventDealAccepted happens when a client receives a response accepting a deal from a provider + ClientEventDealAccepted + + // ClientEventDealPublishFailed happens when a client cannot verify a deal was published + ClientEventDealPublishFailed + + // ClientEventDealPublished happens when a deal is successfully published + ClientEventDealPublished + + // ClientEventDealActivationFailed happens when a client cannot verify a deal was activated + ClientEventDealActivationFailed + + // ClientEventDealActivated happens when a deal is successfully activated + ClientEventDealActivated + + // ClientEventDealCompletionFailed happens when a client cannot verify a deal expired or was slashed + ClientEventDealCompletionFailed + + // ClientEventDealExpired happens when a deal expires + ClientEventDealExpired + + // ClientEventDealSlashed happens when a deal is slashed + ClientEventDealSlashed + + // ClientEventFailed happens when a deal terminates in failure + ClientEventFailed + + // ClientEventRestart is used to resume the deal after a state machine shutdown + ClientEventRestart + + // ClientEventDataTransferStalled happens when the clients data transfer experiences a disconnect + ClientEventDataTransferStalled +) + +// ClientEvents maps client event codes to string names +var ClientEvents = map[ClientEvent]string{ + ClientEventOpen: "ClientEventOpen", + ClientEventEnsureFundsFailed: "ClientEventEnsureFundsFailed", + ClientEventFundingInitiated: "ClientEventFundingInitiated", + ClientEventFundsReserved: "ClientEventFundsReserved", + ClientEventFundsReleased: "ClientEventFundsReleased", + ClientEventFundsEnsured: "ClientEventFundsEnsured", + ClientEventWriteProposalFailed: "ClientEventWriteProposalFailed", + ClientEventInitiateDataTransfer: "ClientEventInitiateDataTransfer", + ClientEventDataTransferInitiated: "ClientEventDataTransferInitiated", + ClientEventDataTransferComplete: "ClientEventDataTransferComplete", + ClientEventWaitForDealState: "ClientEventWaitForDealState", + ClientEventDataTransferFailed: "ClientEventDataTransferFailed", + ClientEventReadResponseFailed: "ClientEventReadResponseFailed", + ClientEventResponseVerificationFailed: "ClientEventResponseVerificationFailed", + ClientEventResponseDealDidNotMatch: "ClientEventResponseDealDidNotMatch", + ClientEventUnexpectedDealState: "ClientEventUnexpectedDealState", + ClientEventStreamCloseError: "ClientEventStreamCloseError", + ClientEventDealRejected: "ClientEventDealRejected", + ClientEventDealAccepted: "ClientEventDealAccepted", + ClientEventDealPublishFailed: "ClientEventDealPublishFailed", + ClientEventDealPublished: "ClientEventDealPublished", + ClientEventDealActivationFailed: "ClientEventDealActivationFailed", + ClientEventDealActivated: "ClientEventDealActivated", + ClientEventDealCompletionFailed: "ClientEventDealCompletionFailed", + ClientEventDealExpired: "ClientEventDealExpired", + ClientEventDealSlashed: "ClientEventDealSlashed", + ClientEventFailed: "ClientEventFailed", + ClientEventRestart: "ClientEventRestart", + ClientEventDataTransferRestarted: "ClientEventDataTransferRestarted", + ClientEventDataTransferRestartFailed: "ClientEventDataTransferRestartFailed", + ClientEventDataTransferStalled: "ClientEventDataTransferStalled", +} + +// ProviderEvent is an event that happens in the provider's deal state machine +type ProviderEvent uint64 + +const ( + // ProviderEventOpen indicates a new deal proposal has been received + ProviderEventOpen ProviderEvent = iota + + // ProviderEventNodeErrored indicates an error happened talking to the node implementation + ProviderEventNodeErrored + + // ProviderEventDealDeciding happens when a deal is being decided on by the miner + ProviderEventDealDeciding + + // ProviderEventDealRejected happens when a deal proposal is rejected for not meeting criteria + ProviderEventDealRejected + + // ProviderEventRejectionSent happens after a deal proposal rejection has been sent to the client + ProviderEventRejectionSent + + // ProviderEventDealAccepted happens when a deal is accepted based on provider criteria + ProviderEventDealAccepted + + // ProviderEventInsufficientFunds indicates not enough funds available for a deal + ProviderEventInsufficientFunds + + // ProviderEventFundsReserved indicates we've reserved funds for a deal, adding to our overall total + ProviderEventFundsReserved + + // ProviderEventFundsReleased indicates we've released funds for a deal + ProviderEventFundsReleased + + // ProviderEventFundingInitiated indicates provider collateral funding has been initiated + ProviderEventFundingInitiated + + // ProviderEventFunded indicates provider collateral has appeared in the storage market balance + ProviderEventFunded + + // ProviderEventDataTransferFailed happens when an error occurs transferring data + ProviderEventDataTransferFailed + + // ProviderEventDataRequested happens when a provider requests data from a client + ProviderEventDataRequested + + // ProviderEventDataTransferInitiated happens when a data transfer starts + ProviderEventDataTransferInitiated + + // ProviderEventDataTransferRestarted happens when a data transfer restarts + ProviderEventDataTransferRestarted + + // ProviderEventDataTransferCompleted happens when a data transfer is successful + ProviderEventDataTransferCompleted + + // ProviderEventManualDataReceived happens when data is received manually for an offline deal + ProviderEventManualDataReceived + + // ProviderEventDataVerificationFailed happens when an error occurs validating deal data + ProviderEventDataVerificationFailed + + // ProviderEventVerifiedData happens when received data is verified as matching the pieceCID in a deal proposal + ProviderEventVerifiedData + + // ProviderEventSendResponseFailed happens when a response cannot be sent to a deal + ProviderEventSendResponseFailed + + // ProviderEventDealPublishInitiated happens when a provider has sent a PublishStorageDeals message to the chain + ProviderEventDealPublishInitiated + + // ProviderEventDealPublished happens when a deal is successfully published + ProviderEventDealPublished + + // ProviderEventDealPublishError happens when PublishStorageDeals returns a non-ok exit code + ProviderEventDealPublishError + + // ProviderEventFileStoreErrored happens when an error occurs accessing the filestore + ProviderEventFileStoreErrored + + // ProviderEventDealHandoffFailed happens when an error occurs handing off a deal with OnDealComplete + ProviderEventDealHandoffFailed + + // ProviderEventDealHandedOff happens when a deal is successfully handed off to the node for processing in a sector + ProviderEventDealHandedOff + + // ProviderEventDealActivationFailed happens when an error occurs activating a deal + ProviderEventDealActivationFailed + + // ProviderEventUnableToLocatePiece happens when an attempt to learn the location of a piece from + // the node fails + ProviderEventUnableToLocatePiece + + // ProviderEventDealActivated happens when a deal is successfully activated and commited to a sector + ProviderEventDealActivated + + // ProviderEventPieceStoreErrored happens when an attempt to save data in the piece store errors + ProviderEventPieceStoreErrored + + // ProviderEventReadMetadataErrored happens when an error occurs reading recorded piece metadata + ProviderEventReadMetadataErrored + + // ProviderEventFinalized happens when final housekeeping is complete and a deal is active + ProviderEventFinalized + + // ProviderEventDealCompletionFailed happens when a miner cannot verify a deal expired or was slashed + ProviderEventDealCompletionFailed + + // ProviderEventMultistoreErrored indicates an error happened with a store for a deal + ProviderEventMultistoreErrored + + // ProviderEventDealExpired happens when a deal expires + ProviderEventDealExpired + + // ProviderEventDealSlashed happens when a deal is slashed + ProviderEventDealSlashed + + // ProviderEventFailed indicates a deal has failed and should no longer be processed + ProviderEventFailed + + // ProviderEventTrackFundsFailed indicates a failure trying to locally track funds needed for deals + ProviderEventTrackFundsFailed + + // ProviderEventRestart is used to resume the deal after a state machine shutdown + ProviderEventRestart + + // ProviderEventDataTransferRestartFailed means a data transfer that was restarted by the provider failed + ProviderEventDataTransferRestartFailed + + // ProviderEventDataTransferStalled happens when the providers data transfer experiences a disconnect + ProviderEventDataTransferStalled +) + +// ProviderEvents maps provider event codes to string names +var ProviderEvents = map[ProviderEvent]string{ + ProviderEventOpen: "ProviderEventOpen", + ProviderEventNodeErrored: "ProviderEventNodeErrored", + ProviderEventDealRejected: "ProviderEventDealRejected", + ProviderEventRejectionSent: "ProviderEventRejectionSent", + ProviderEventDealAccepted: "ProviderEventDealAccepted", + ProviderEventDealDeciding: "ProviderEventDealDeciding", + ProviderEventInsufficientFunds: "ProviderEventInsufficientFunds", + ProviderEventFundsReserved: "ProviderEventFundsReserved", + ProviderEventFundsReleased: "ProviderEventFundsReleased", + ProviderEventFundingInitiated: "ProviderEventFundingInitiated", + ProviderEventFunded: "ProviderEventFunded", + ProviderEventDataTransferFailed: "ProviderEventDataTransferFailed", + ProviderEventDataRequested: "ProviderEventDataRequested", + ProviderEventDataTransferInitiated: "ProviderEventDataTransferInitiated", + ProviderEventDataTransferCompleted: "ProviderEventDataTransferCompleted", + ProviderEventManualDataReceived: "ProviderEventManualDataReceived", + ProviderEventDataVerificationFailed: "ProviderEventDataVerificationFailed", + ProviderEventVerifiedData: "ProviderEventVerifiedData", + ProviderEventSendResponseFailed: "ProviderEventSendResponseFailed", + ProviderEventDealPublishInitiated: "ProviderEventDealPublishInitiated", + ProviderEventDealPublished: "ProviderEventDealPublished", + ProviderEventDealPublishError: "ProviderEventDealPublishError", + ProviderEventFileStoreErrored: "ProviderEventFileStoreErrored", + ProviderEventDealHandoffFailed: "ProviderEventDealHandoffFailed", + ProviderEventDealHandedOff: "ProviderEventDealHandedOff", + ProviderEventDealActivationFailed: "ProviderEventDealActivationFailed", + ProviderEventDealActivated: "ProviderEventDealActivated", + ProviderEventPieceStoreErrored: "ProviderEventPieceStoreErrored", + ProviderEventFinalized: "ProviderEventCleanupFinished", + ProviderEventDealCompletionFailed: "ProviderEventDealCompletionFailed", + ProviderEventMultistoreErrored: "ProviderEventMultistoreErrored", + ProviderEventDealExpired: "ProviderEventDealExpired", + ProviderEventDealSlashed: "ProviderEventDealSlashed", + ProviderEventFailed: "ProviderEventFailed", + ProviderEventTrackFundsFailed: "ProviderEventTrackFundsFailed", + ProviderEventRestart: "ProviderEventRestart", + ProviderEventDataTransferRestarted: "ProviderEventDataTransferRestarted", + ProviderEventDataTransferRestartFailed: "ProviderEventDataTransferRestartFailed", + ProviderEventDataTransferStalled: "ProviderEventDataTransferStalled", +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/common.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/common.go new file mode 100644 index 0000000000..e265c0376b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/common.go @@ -0,0 +1,86 @@ +package requestvalidation + +import ( + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + "golang.org/x/xerrors" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// ValidatePush validates a push request received from the peer that will send data +// Will succeed only if: +// - voucher has correct type +// - voucher references an active deal +// - referenced deal matches the client +// - referenced deal matches the given base CID +// - referenced deal is in an acceptable state +func ValidatePush( + deals PushDeals, + sender peer.ID, + voucher datatransfer.Voucher, + baseCid cid.Cid, + Selector ipld.Node) error { + dealVoucher, ok := voucher.(*StorageDataTransferVoucher) + if !ok { + return xerrors.Errorf("voucher type %s: %w", voucher.Type(), ErrWrongVoucherType) + } + + var deal storagemarket.MinerDeal + deal, err := deals.Get(dealVoucher.Proposal) + if err != nil { + return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) + } + if deal.Client != sender { + return xerrors.Errorf("Deal Peer %s, Data Transfer Peer %s: %w", deal.Client.String(), sender.String(), ErrWrongPeer) + } + + if !deal.Ref.Root.Equals(baseCid) { + return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", deal.Proposal.PieceCID.String(), baseCid.String(), ErrWrongPiece) + } + for _, state := range DataTransferStates { + if deal.State == state { + return nil + } + } + return xerrors.Errorf("Deal State %s: %w", deal.State, ErrInacceptableDealState) +} + +// ValidatePull validates a pull request received from the peer that will receive data +// Will succeed only if: +// - voucher has correct type +// - voucher references an active deal +// - referenced deal matches the receiver (miner) +// - referenced deal matches the given base CID +// - referenced deal is in an acceptable state +func ValidatePull( + deals PullDeals, + receiver peer.ID, + voucher datatransfer.Voucher, + baseCid cid.Cid, + Selector ipld.Node) error { + dealVoucher, ok := voucher.(*StorageDataTransferVoucher) + if !ok { + return xerrors.Errorf("voucher type %s: %w", voucher.Type(), ErrWrongVoucherType) + } + deal, err := deals.Get(dealVoucher.Proposal) + if err != nil { + return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) + } + + if deal.Miner != receiver { + return xerrors.Errorf("Deal Peer %s, Data Transfer Peer %s: %w", deal.Miner.String(), receiver.String(), ErrWrongPeer) + } + if !deal.DataRef.Root.Equals(baseCid) { + return xerrors.Errorf("Deal Payload CID %s, Data Transfer CID %s: %w", deal.Proposal.PieceCID.String(), baseCid.String(), ErrWrongPiece) + } + for _, state := range DataTransferStates { + if deal.State == state { + return nil + } + } + return xerrors.Errorf("Deal State %s: %w", deal.State, ErrInacceptableDealState) +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/doc.go new file mode 100644 index 0000000000..46d70bd902 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/doc.go @@ -0,0 +1,3 @@ +// Package requestvalidation implements a request validator for the data transfer module +// to validate data transfer requests for storage deals +package requestvalidation diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types.go new file mode 100644 index 0000000000..ae076df393 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types.go @@ -0,0 +1,55 @@ +package requestvalidation + +import ( + "errors" + + "github.com/ipfs/go-cid" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +//go:generate cbor-gen-for StorageDataTransferVoucher + +var ( + // ErrWrongVoucherType means the voucher was not the correct type can validate against + ErrWrongVoucherType = errors.New("cannot validate voucher type") + + // ErrNoPushAccepted just means clients do not accept pushes for storage deals + ErrNoPushAccepted = errors.New("client should not receive data for a storage deal") + + // ErrNoPullAccepted just means providers do not accept pulls for storage deals + ErrNoPullAccepted = errors.New("provider should not send data for a storage deal") + + // ErrNoDeal means no active deal was found for this vouchers proposal cid + ErrNoDeal = errors.New("no deal found for this proposal") + + // ErrWrongPeer means that the other peer for this data transfer request does not match + // the other peer for the deal + ErrWrongPeer = errors.New("data Transfer peer id and Deal peer id do not match") + + // ErrWrongPiece means that the pieceref for this data transfer request does not match + // the one specified in the deal + ErrWrongPiece = errors.New("base CID for deal does not match CID for piece") + + // ErrInacceptableDealState means the deal for this transfer is not in a deal state + // where transfer can be performed + ErrInacceptableDealState = errors.New("deal is not a in a state where deals are accepted") + + // DataTransferStates are the states in which it would make sense to actually start a data transfer + // We accept deals even in the StorageDealTransferring state too as we could also also receive a data transfer restart request + DataTransferStates = []storagemarket.StorageDealStatus{storagemarket.StorageDealValidating, storagemarket.StorageDealWaitingForData, storagemarket.StorageDealUnknown, + storagemarket.StorageDealTransferring, storagemarket.StorageDealProviderTransferRestart} +) + +// StorageDataTransferVoucher is the voucher type for data transfers +// used by the storage market +type StorageDataTransferVoucher struct { + Proposal cid.Cid +} + +// Type is the unique string identifier for a StorageDataTransferVoucher +func (dv *StorageDataTransferVoucher) Type() datatransfer.TypeIdentifier { + return "StorageDataTransferVoucher" +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types_cbor_gen.go new file mode 100644 index 0000000000..d952f221d8 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types_cbor_gen.go @@ -0,0 +1,68 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package requestvalidation + +import ( + "fmt" + "io" + + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufStorageDataTransferVoucher = []byte{129} + +func (t *StorageDataTransferVoucher) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStorageDataTransferVoucher); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Proposal (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Proposal); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) + } + + return nil +} + +func (t *StorageDataTransferVoucher) UnmarshalCBOR(r io.Reader) error { + *t = StorageDataTransferVoucher{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Proposal (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) + } + + t.Proposal = c + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/unified_request_validator.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/unified_request_validator.go new file mode 100644 index 0000000000..065df6cb2b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/unified_request_validator.go @@ -0,0 +1,72 @@ +package requestvalidation + +import ( + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" +) + +// PushDeals gets deal states for Push validations +type PushDeals interface { + Get(cid.Cid) (storagemarket.MinerDeal, error) +} + +// PullDeals gets deal states for Pull validations +type PullDeals interface { + Get(cid.Cid) (storagemarket.ClientDeal, error) +} + +// UnifiedRequestValidator is a data transfer request validator that validates +// StorageDataTransferVoucher from the given state store +// It can be made to only accept push requests (Provider) or pull requests (Client) +// by passing nil for the statestore value for pushes or pulls +type UnifiedRequestValidator struct { + pushDeals PushDeals + pullDeals PullDeals +} + +// NewUnifiedRequestValidator returns a new instance of UnifiedRequestValidator +func NewUnifiedRequestValidator(pushDeals PushDeals, pullDeals PullDeals) *UnifiedRequestValidator { + return &UnifiedRequestValidator{ + pushDeals: pushDeals, + pullDeals: pullDeals, + } +} + +// SetPushDeals sets the store to look up push deals with +func (v *UnifiedRequestValidator) SetPushDeals(pushDeals PushDeals) { + v.pushDeals = pushDeals +} + +// SetPullDeals sets the store to look up pull deals with +func (v *UnifiedRequestValidator) SetPullDeals(pullDeals PullDeals) { + v.pullDeals = pullDeals +} + +// ValidatePush implements the ValidatePush method of a data transfer request validator. +// If no pushStore exists, it rejects the request +// Otherwise, it calls the ValidatePush function to validate the deal +func (v *UnifiedRequestValidator) ValidatePush(sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + if v.pushDeals == nil { + return nil, ErrNoPushAccepted + } + + return nil, ValidatePush(v.pushDeals, sender, voucher, baseCid, selector) +} + +// ValidatePull implements the ValidatePull method of a data transfer request validator. +// If no pullStore exists, it rejects the request +// Otherwise, it calls the ValidatePull function to validate the deal +func (v *UnifiedRequestValidator) ValidatePull(receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { + if v.pullDeals == nil { + return nil, ErrNoPullAccepted + } + + return nil, ValidatePull(v.pullDeals, receiver, voucher, baseCid, selector) +} + +var _ datatransfer.RequestValidator = &UnifiedRequestValidator{} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go new file mode 100644 index 0000000000..613980dd9b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go @@ -0,0 +1,109 @@ +package storagemarket + +import ( + "context" + "io" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// DealSectorCommittedCallback is a callback that runs when a sector is committed +type DealSectorCommittedCallback func(err error) + +// DealExpiredCallback is a callback that runs when a deal expires +type DealExpiredCallback func(err error) + +// DealSlashedCallback is a callback that runs when a deal gets slashed +type DealSlashedCallback func(slashEpoch abi.ChainEpoch, err error) + +// StorageCommon are common interfaces provided by a filecoin Node to both StorageClient and StorageProvider +type StorageCommon interface { + + // GetChainHead returns a tipset token for the current chain head + GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) + + // Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients. + AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) + + // EnsureFunds ensures that a storage market participant has a certain amount of available funds + // If additional funds are needed, they will be sent from the 'wallet' address, and a cid for the + // corresponding chain message is returned + EnsureFunds(ctx context.Context, addr, wallet address.Address, amount abi.TokenAmount, tok shared.TipSetToken) (cid.Cid, error) + + // GetBalance returns locked/unlocked for a storage participant. Used by both providers and clients. + GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (Balance, error) + + // VerifySignature verifies a given set of data was signed properly by a given address's private key + VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, plaintext []byte, tok shared.TipSetToken) (bool, error) + + // WaitForMessage waits until a message appears on chain. If it is already on chain, the callback is called immediately + WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, cid.Cid, error) error) error + + // SignsBytes signs the given data with the given address's private key + SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) + + // DealProviderCollateralBounds returns the min and max collateral a storage provider can issue. + DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) + + // OnDealSectorCommitted waits for a deal's sector to be sealed and proved, indicating the deal is active + OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, cb DealSectorCommittedCallback) error + + // OnDealExpiredOrSlashed registers callbacks to be called when the deal expires or is slashed + OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired DealExpiredCallback, onDealSlashed DealSlashedCallback) error +} + +// PackingResult returns information about how a deal was put into a sector +type PackingResult struct { + SectorNumber abi.SectorNumber + Offset abi.PaddedPieceSize + Size abi.PaddedPieceSize +} + +// StorageProviderNode are node dependencies for a StorageProvider +type StorageProviderNode interface { + StorageCommon + + // PublishDeals publishes a deal on chain, returns the message cid, but does not wait for message to appear + PublishDeals(ctx context.Context, deal MinerDeal) (cid.Cid, error) + + // OnDealComplete is called when a deal is complete and on chain, and data has been transferred and is ready to be added to a sector + OnDealComplete(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) (*PackingResult, error) + + // GetMinerWorkerAddress returns the worker address associated with a miner + GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken) (address.Address, error) + + // LocatePieceForDealWithinSector looks up a given dealID in the miners sectors, and returns its sectorID and location + LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, tok shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) + + // GetDataCap gets the current data cap for addr + GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (*verifreg.DataCap, error) +} + +// StorageClientNode are node dependencies for a StorageClient +type StorageClientNode interface { + StorageCommon + + // GetStorageProviders returns information about known miners + ListStorageProviders(ctx context.Context, tok shared.TipSetToken) ([]*StorageProviderInfo, error) + + // ValidatePublishedDeal verifies a deal is published on chain and returns the dealID + ValidatePublishedDeal(ctx context.Context, deal ClientDeal) (abi.DealID, error) + + // SignProposal signs a DealProposal + SignProposal(ctx context.Context, signer address.Address, proposal market.DealProposal) (*market.ClientDealProposal, error) + + // GetDefaultWalletAddress returns the address for this client + GetDefaultWalletAddress(ctx context.Context) (address.Address, error) + + // GetMinerInfo returns info for a single miner with the given address + GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (*StorageProviderInfo, error) +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/provider.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/provider.go new file mode 100644 index 0000000000..2459fd5b67 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/provider.go @@ -0,0 +1,53 @@ +package storagemarket + +import ( + "context" + "io" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" +) + +// ProviderSubscriber is a callback that is run when events are emitted on a StorageProvider +type ProviderSubscriber func(event ProviderEvent, deal MinerDeal) + +// StorageProvider provides an interface to the storage market for a single +// storage miner. +type StorageProvider interface { + + // Start initializes deal processing on a StorageProvider and restarts in progress deals. + // It also registers the provider with a StorageMarketNetwork so it can receive incoming + // messages on the storage market's libp2p protocols + Start(ctx context.Context) error + + // OnReady registers a listener for when the provider comes on line + OnReady(shared.ReadyFunc) + + // Stop terminates processing of deals on a StorageProvider + Stop() error + + // SetAsk configures the storage miner's ask with the provided prices (for unverified and verified deals), + // duration, and options. Any previously-existing ask is replaced. + SetAsk(price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, options ...StorageAskOption) error + + // GetAsk returns the storage miner's ask, or nil if one does not exist. + GetAsk() *SignedStorageAsk + + // ListLocalDeals lists deals processed by this storage provider + ListLocalDeals() ([]MinerDeal, error) + + // AddStorageCollateral adds storage collateral + AddStorageCollateral(ctx context.Context, amount abi.TokenAmount) error + + // GetStorageCollateral returns the current collateral balance + GetStorageCollateral(ctx context.Context) (Balance, error) + + // ImportDataForDeal manually imports data for an offline storage deal + ImportDataForDeal(ctx context.Context, propCid cid.Cid, data io.Reader) error + + // SubscribeToEvents listens for events that happen related to storage deals on a provider + SubscribeToEvents(subscriber ProviderSubscriber) shared.Unsubscribe +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go new file mode 100644 index 0000000000..db6914ea92 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go @@ -0,0 +1,191 @@ +package storagemarket + +import ( + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + + "github.com/filecoin-project/go-fil-markets/filestore" +) + +//go:generate cbor-gen-for --map-encoding ClientDeal MinerDeal Balance SignedStorageAsk StorageAsk DataRef ProviderDealState + +// DealProtocolID is the ID for the libp2p protocol for proposing storage deals. +const OldDealProtocolID = "/fil/storage/mk/1.0.1" +const DealProtocolID = "/fil/storage/mk/1.1.0" + +// AskProtocolID is the ID for the libp2p protocol for querying miners for their current StorageAsk. +const OldAskProtocolID = "/fil/storage/ask/1.0.1" +const AskProtocolID = "/fil/storage/ask/1.1.0" + +// DealStatusProtocolID is the ID for the libp2p protocol for querying miners for the current status of a deal. +const OldDealStatusProtocolID = "/fil/storage/status/1.0.1" +const DealStatusProtocolID = "/fil/storage/status/1.1.0" + +// Balance represents a current balance of funds in the StorageMarketActor. +type Balance struct { + Locked abi.TokenAmount + Available abi.TokenAmount +} + +// StorageAsk defines the parameters by which a miner will choose to accept or +// reject a deal. Note: making a storage deal proposal which matches the miner's +// ask is a precondition, but not sufficient to ensure the deal is accepted (the +// storage provider may run its own decision logic). +type StorageAsk struct { + // Price per GiB / Epoch + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount + + MinPieceSize abi.PaddedPieceSize + MaxPieceSize abi.PaddedPieceSize + Miner address.Address + Timestamp abi.ChainEpoch + Expiry abi.ChainEpoch + SeqNo uint64 +} + +// SignedStorageAsk is an ask signed by the miner's private key +type SignedStorageAsk struct { + Ask *StorageAsk + Signature *crypto.Signature +} + +// SignedStorageAskUndefined represents the empty value for SignedStorageAsk +var SignedStorageAskUndefined = SignedStorageAsk{} + +// StorageAskOption allows custom configuration of a storage ask +type StorageAskOption func(*StorageAsk) + +// MinPieceSize configures a minimum piece size of a StorageAsk +func MinPieceSize(minPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MinPieceSize = minPieceSize + } +} + +// MaxPieceSize configures maxiumum piece size of a StorageAsk +func MaxPieceSize(maxPieceSize abi.PaddedPieceSize) StorageAskOption { + return func(sa *StorageAsk) { + sa.MaxPieceSize = maxPieceSize + } +} + +// StorageAskUndefined represents an empty value for StorageAsk +var StorageAskUndefined = StorageAsk{} + +// MinerDeal is the local state tracked for a deal by a StorageProvider +type MinerDeal struct { + market.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + StoreID *multistore.StoreID + FundsReserved abi.TokenAmount + Ref *DataRef + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime + + TransferChannelId *datatransfer.ChannelID +} + +// ClientDeal is the local state tracked for a deal by a StorageClient +type ClientDeal struct { + market.ClientDealProposal + ProposalCid cid.Cid + AddFundsCid *cid.Cid + State StorageDealStatus + Miner peer.ID + MinerWorker address.Address + DealID abi.DealID + DataRef *DataRef + Message string + PublishMessage *cid.Cid + SlashEpoch abi.ChainEpoch + PollRetryCount uint64 + PollErrorCount uint64 + FastRetrieval bool + StoreID *multistore.StoreID + FundsReserved abi.TokenAmount + CreationTime cbg.CborTime + TransferChannelID *datatransfer.ChannelID +} + +// StorageProviderInfo describes on chain information about a StorageProvider +// (use QueryAsk to determine more specific deal parameters) +type StorageProviderInfo struct { + Address address.Address // actor address + Owner address.Address + Worker address.Address // signs messages + SectorSize uint64 + PeerID peer.ID + Addrs []ma.Multiaddr +} + +// ProposeStorageDealResult returns the result for a proposing a deal +type ProposeStorageDealResult struct { + ProposalCid cid.Cid +} + +// ProposeStorageDealParams describes the parameters for proposing a storage deal +type ProposeStorageDealParams struct { + Addr address.Address + Info *StorageProviderInfo + Data *DataRef + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + Price abi.TokenAmount + Collateral abi.TokenAmount + Rt abi.RegisteredSealProof + FastRetrieval bool + VerifiedDeal bool + StoreID *multistore.StoreID +} + +const ( + // TTGraphsync means data for a deal will be transferred by graphsync + TTGraphsync = "graphsync" + + // TTManual means data for a deal will be transferred manually and imported + // on the provider + TTManual = "manual" +) + +// DataRef is a reference for how data will be transferred for a given storage deal +type DataRef struct { + TransferType string + Root cid.Cid + + PieceCid *cid.Cid // Optional for non-manual transfer, will be recomputed from the data if not given + PieceSize abi.UnpaddedPieceSize // Optional for non-manual transfer, will be recomputed from the data if not given +} + +// ProviderDealState represents a Provider's current state of a deal +type ProviderDealState struct { + State StorageDealStatus + Message string + Proposal *market.DealProposal + ProposalCid *cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + DealID abi.DealID + FastRetrieval bool +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go new file mode 100644 index 0000000000..de3bdb7598 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go @@ -0,0 +1,2512 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package storagemarket + +import ( + "fmt" + "io" + + datatransfer "github.com/filecoin-project/go-data-transfer" + filestore "github.com/filecoin-project/go-fil-markets/filestore" + multistore "github.com/filecoin-project/go-multistore" + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + market "github.com/filecoin-project/specs-actors/actors/builtin/market" + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +func (t *ClientDeal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{178}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(w); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.MinerWorker (address.Address) (struct) + if len("MinerWorker") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWorker\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MinerWorker"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinerWorker")); err != nil { + return err + } + + if err := t.MinerWorker.MarshalCBOR(w); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.DataRef (storagemarket.DataRef) (struct) + if len("DataRef") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DataRef\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DataRef"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DataRef")); err != nil { + return err + } + + if err := t.DataRef.MarshalCBOR(w); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.PublishMessage (cid.Cid) (struct) + if len("PublishMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishMessage\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishMessage")); err != nil { + return err + } + + if t.PublishMessage == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) + } + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.PollRetryCount (uint64) (uint64) + if len("PollRetryCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollRetryCount\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PollRetryCount"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PollRetryCount")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PollRetryCount)); err != nil { + return err + } + + // t.PollErrorCount (uint64) (uint64) + if len("PollErrorCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollErrorCount\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PollErrorCount"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PollErrorCount")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PollErrorCount)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.StoreID (multistore.StoreID) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(w); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(w); err != nil { + return err + } + + // t.TransferChannelID (datatransfer.ChannelID) (struct) + if len("TransferChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferChannelID")); err != nil { + return err + } + + if err := t.TransferChannelID.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ClientDeal) UnmarshalCBOR(r io.Reader) error { + *t = ClientDeal{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDeal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.MinerWorker (address.Address) (struct) + case "MinerWorker": + + { + + if err := t.MinerWorker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWorker: %w", err) + } + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.DataRef (storagemarket.DataRef) (struct) + case "DataRef": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.DataRef = new(DataRef) + if err := t.DataRef.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DataRef pointer: %w", err) + } + } + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.PublishMessage (cid.Cid) (struct) + case "PublishMessage": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } + + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.PollRetryCount (uint64) (uint64) + case "PollRetryCount": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollRetryCount = uint64(extra) + + } + // t.PollErrorCount (uint64) (uint64) + case "PollErrorCount": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollErrorCount = uint64(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.StoreID (multistore.StoreID) (uint64) + case "StoreID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := multistore.StoreID(extra) + t.StoreID = &typed + } + + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.TransferChannelID (datatransfer.ChannelID) (struct) + case "TransferChannelID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.TransferChannelID = new(datatransfer.ChannelID) + if err := t.TransferChannelID.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelID pointer: %w", err) + } + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *MinerDeal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{179}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientDealProposal")); err != nil { + return err + } + + if err := t.ClientDealProposal.MarshalCBOR(w); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if len(t.Miner) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Miner was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Miner))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Miner)); err != nil { + return err + } + + // t.Client (peer.ID) (string) + if len("Client") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Client\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Client"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Client")); err != nil { + return err + } + + if len(t.Client) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Client was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Client))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Client)); err != nil { + return err + } + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.PiecePath (filestore.Path) (string) + if len("PiecePath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PiecePath\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PiecePath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PiecePath")); err != nil { + return err + } + + if len(t.PiecePath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.PiecePath was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.PiecePath)); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len("MetadataPath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MetadataPath\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MetadataPath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MetadataPath")); err != nil { + return err + } + + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.MetadataPath)); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.StoreID (multistore.StoreID) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(w); err != nil { + return err + } + + // t.Ref (storagemarket.DataRef) (struct) + if len("Ref") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ref\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Ref"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ref")); err != nil { + return err + } + + if err := t.Ref.MarshalCBOR(w); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if len("AvailableForRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AvailableForRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AvailableForRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AvailableForRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(w); err != nil { + return err + } + + // t.TransferChannelId (datatransfer.ChannelID) (struct) + if len("TransferChannelId") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelId\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferChannelId"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferChannelId")); err != nil { + return err + } + + if err := t.TransferChannelId.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *MinerDeal) UnmarshalCBOR(r io.Reader) error { + *t = MinerDeal{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("MinerDeal: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": + + { + + if err := t.ClientDealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.Miner (peer.ID) (string) + case "Miner": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Miner = peer.ID(sval) + } + // t.Client (peer.ID) (string) + case "Client": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Client = peer.ID(sval) + } + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.PiecePath (filestore.Path) (string) + case "PiecePath": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.PiecePath = filestore.Path(sval) + } + // t.MetadataPath (filestore.Path) (string) + case "MetadataPath": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.MetadataPath = filestore.Path(sval) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.StoreID (multistore.StoreID) (uint64) + case "StoreID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := multistore.StoreID(extra) + t.StoreID = &typed + } + + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": + + { + + if err := t.FundsReserved.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } + + } + // t.Ref (storagemarket.DataRef) (struct) + case "Ref": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Ref = new(DataRef) + if err := t.Ref.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } + + } + // t.AvailableForRetrieval (bool) (bool) + case "AvailableForRetrieval": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + + } + // t.TransferChannelId (datatransfer.ChannelID) (struct) + case "TransferChannelId": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.TransferChannelId = new(datatransfer.ChannelID) + if err := t.TransferChannelId.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelId pointer: %w", err) + } + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *Balance) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Locked (big.Int) (struct) + if len("Locked") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Locked\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Locked"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Locked")); err != nil { + return err + } + + if err := t.Locked.MarshalCBOR(w); err != nil { + return err + } + + // t.Available (big.Int) (struct) + if len("Available") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Available\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Available"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Available")); err != nil { + return err + } + + if err := t.Available.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Balance) UnmarshalCBOR(r io.Reader) error { + *t = Balance{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("Balance: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Locked (big.Int) (struct) + case "Locked": + + { + + if err := t.Locked.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Locked: %w", err) + } + + } + // t.Available (big.Int) (struct) + case "Available": + + { + + if err := t.Available.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Available: %w", err) + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{162}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Ask (storagemarket.StorageAsk) (struct) + if len("Ask") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ask")); err != nil { + return err + } + + if err := t.Ask.MarshalCBOR(w); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Signature")); err != nil { + return err + } + + if err := t.Signature.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *SignedStorageAsk) UnmarshalCBOR(r io.Reader) error { + *t = SignedStorageAsk{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("SignedStorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Ask (storagemarket.StorageAsk) (struct) + case "Ask": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Ask = new(StorageAsk) + if err := t.Ask.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } + + } + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *StorageAsk) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{168}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Price (big.Int) (struct) + if len("Price") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Price\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Price"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Price")); err != nil { + return err + } + + if err := t.Price.MarshalCBOR(w); err != nil { + return err + } + + // t.VerifiedPrice (big.Int) (struct) + if len("VerifiedPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VerifiedPrice\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VerifiedPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VerifiedPrice")); err != nil { + return err + } + + if err := t.VerifiedPrice.MarshalCBOR(w); err != nil { + return err + } + + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + if len("MinPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinPieceSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MinPieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinPieceSize")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinPieceSize)); err != nil { + return err + } + + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + if len("MaxPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPieceSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MaxPieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MaxPieceSize")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MaxPieceSize)); err != nil { + return err + } + + // t.Miner (address.Address) (struct) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + + if err := t.Miner.MarshalCBOR(w); err != nil { + return err + } + + // t.Timestamp (abi.ChainEpoch) (int64) + if len("Timestamp") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Timestamp\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Timestamp"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Timestamp")); err != nil { + return err + } + + if t.Timestamp >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { + return err + } + } + + // t.Expiry (abi.ChainEpoch) (int64) + if len("Expiry") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Expiry\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Expiry"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Expiry")); err != nil { + return err + } + + if t.Expiry >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiry)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiry-1)); err != nil { + return err + } + } + + // t.SeqNo (uint64) (uint64) + if len("SeqNo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SeqNo\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SeqNo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SeqNo")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SeqNo)); err != nil { + return err + } + + return nil +} + +func (t *StorageAsk) UnmarshalCBOR(r io.Reader) error { + *t = StorageAsk{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("StorageAsk: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Price (big.Int) (struct) + case "Price": + + { + + if err := t.Price.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Price: %w", err) + } + + } + // t.VerifiedPrice (big.Int) (struct) + case "VerifiedPrice": + + { + + if err := t.VerifiedPrice.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedPrice: %w", err) + } + + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + case "MinPieceSize": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MinPieceSize = abi.PaddedPieceSize(extra) + + } + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + case "MaxPieceSize": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPieceSize = abi.PaddedPieceSize(extra) + + } + // t.Miner (address.Address) (struct) + case "Miner": + + { + + if err := t.Miner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Timestamp (abi.ChainEpoch) (int64) + case "Timestamp": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = abi.ChainEpoch(extraI) + } + // t.Expiry (abi.ChainEpoch) (int64) + case "Expiry": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiry = abi.ChainEpoch(extraI) + } + // t.SeqNo (uint64) (uint64) + case "SeqNo": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SeqNo = uint64(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *DataRef) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{164}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.TransferType (string) (string) + if len("TransferType") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferType\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferType"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferType")); err != nil { + return err + } + + if len(t.TransferType) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.TransferType was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.TransferType))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.TransferType)); err != nil { + return err + } + + // t.Root (cid.Cid) (struct) + if len("Root") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Root\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Root"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Root")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.Root); err != nil { + return xerrors.Errorf("failed to write cid field t.Root: %w", err) + } + + // t.PieceCid (cid.Cid) (struct) + if len("PieceCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCid")); err != nil { + return err + } + + if t.PieceCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PieceCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCid: %w", err) + } + } + + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + if len("PieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceSize")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { + return err + } + + return nil +} + +func (t *DataRef) UnmarshalCBOR(r io.Reader) error { + *t = DataRef{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DataRef: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.TransferType (string) (string) + case "TransferType": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.TransferType = string(sval) + } + // t.Root (cid.Cid) (struct) + case "Root": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Root: %w", err) + } + + t.Root = c + + } + // t.PieceCid (cid.Cid) (struct) + case "PieceCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) + } + + t.PieceCid = &c + } + + } + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + case "PieceSize": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceSize = abi.UnpaddedPieceSize(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} +func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{168}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.Proposal (market.DealProposal) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Proposal")); err != nil { + return err + } + + if err := t.Proposal.MarshalCBOR(w); err != nil { + return err + } + + // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if t.ProposalCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.ProposalCid); err != nil { + return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) + } + } + + // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } + + if t.AddFundsCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.AddFundsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) + } + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + return nil +} + +func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) error { + *t = ProviderDealState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.State (uint64) (uint64) + case "State": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) + + } + // t.Message (string) (string) + case "Message": + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) + } + // t.Proposal (market.DealProposal) (struct) + case "Proposal": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Proposal = new(market.DealProposal) + if err := t.Proposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal pointer: %w", err) + } + } + + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = &c + } + + } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/go-statestore/.circleci/config.yml b/vendor/github.com/filecoin-project/go-statestore/.circleci/config.yml new file mode 100644 index 0000000000..958082979c --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/.circleci/config.yml @@ -0,0 +1,159 @@ +version: 2.1 +orbs: + go: gotest/tools@0.0.9 + +executors: + golang: + docker: + - image: circleci/golang:1.13 + +commands: + install-deps: + steps: + - go/install-ssh + - go/install: {package: git} + +jobs: + mod-tidy-check: + executor: golang + steps: + - install-deps + - checkout + - go/mod-download + - go/mod-tidy-check + + build-all: + executor: golang + steps: + - install-deps + - checkout + - go/mod-download + - restore_cache: + name: restore go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + - run: + command: make build + + test: &test + description: | + Run tests with gotestsum. + parameters: + executor: + type: executor + default: golang + go-test-flags: + type: string + default: "-timeout 30m" + description: Flags passed to go test. + packages: + type: string + default: "./..." + description: Import paths of packages to be tested. + test-suite-name: + type: string + default: unit + description: Test suite name to report to CircleCI. + gotestsum-format: + type: string + default: short + description: gotestsum format. https://github.com/gotestyourself/gotestsum#format + coverage: + type: string + default: -coverprofile=coverage.txt + description: Coverage flag. Set to the empty string to disable. + codecov-upload: + type: boolean + default: false + description: | + Upload coverage report to https://codecov.io/. Requires the codecov API token to be + set as an environment variable for private projects. + executor: << parameters.executor >> + steps: + - install-deps + - checkout + - go/mod-download + - restore_cache: + name: restore go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + - go/install-gotestsum: + gobin: $HOME/.local/bin + - run: + name: make test + environment: + GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml + GOTESTSUM_FORMAT: << parameters.gotestsum-format >> + command: | + mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> + gotestsum -- \ + << parameters.coverage >> \ + << parameters.go-test-flags >> \ + << parameters.packages >> + no_output_timeout: 30m + - store_test_results: + path: /tmp/test-reports + - when: + condition: << parameters.codecov-upload >> + steps: + - go/install: {package: bash} + - go/install: {package: curl} + - run: + shell: /bin/bash -eo pipefail + command: | + bash <(curl -s https://codecov.io/bash) + - save_cache: + name: save go mod cache + key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} + paths: + - "~/go/pkg" + - "~/go/src/github.com" + - "~/go/src/golang.org" + + lint: &lint + description: | + Run golangci-lint. + parameters: + executor: + type: executor + default: golang + golangci-lint-version: + type: string + default: 1.17.1 + concurrency: + type: string + default: '2' + description: | + Concurrency used to run linters. Defaults to 2 because NumCPU is not + aware of container CPU limits. + args: + type: string + default: '' + description: | + Arguments to pass to golangci-lint + executor: << parameters.executor >> + steps: + - install-deps + - checkout + - go/mod-download + - go/install-golangci-lint: + gobin: $HOME/.local/bin + version: << parameters.golangci-lint-version >> + - run: + name: Lint + command: | + $HOME/.local/bin/golangci-lint run -v \ + --concurrency << parameters.concurrency >> << parameters.args >> + lint-changes: + <<: *lint + + lint-all: + <<: *lint + +workflows: + version: 2.1 + ci: + jobs: + - lint-changes: + args: "--new-from-rev origin/master" + - test + - mod-tidy-check + - build-all diff --git a/vendor/github.com/filecoin-project/go-statestore/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-statestore/LICENSE-APACHE new file mode 100644 index 0000000000..14478a3b60 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-statestore/LICENSE-MIT b/vendor/github.com/filecoin-project/go-statestore/LICENSE-MIT new file mode 100644 index 0000000000..72dc60d84b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-statestore/Makefile b/vendor/github.com/filecoin-project/go-statestore/Makefile new file mode 100644 index 0000000000..b43b86a713 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/Makefile @@ -0,0 +1,16 @@ +SHELL=/usr/bin/env bash + +all: build +.PHONY: all + +test: + go test -v $(GOFLAGS) ./... +.PHONY: test + +lint: + golangci-lint run -v --concurrency 2 --new-from-rev origin/master +.PHONY: lint + +build: + go build -v $(GOFLAGS) ./... +.PHONY: build diff --git a/vendor/github.com/filecoin-project/go-statestore/README.md b/vendor/github.com/filecoin-project/go-statestore/README.md new file mode 100644 index 0000000000..4fef97ce81 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/README.md @@ -0,0 +1,7 @@ +# go-statestore + +A general-purpose key-value store for CBOR-encodable data + +## License + +Dual-licensed under [MIT](https://github.com/filecoin-project/go-statestore/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/go-statestore/blob/master/LICENSE-APACHE) diff --git a/vendor/github.com/filecoin-project/go-statestore/go.mod b/vendor/github.com/filecoin-project/go-statestore/go.mod new file mode 100644 index 0000000000..dc4db6a4eb --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/go.mod @@ -0,0 +1,24 @@ +module github.com/filecoin-project/go-statestore + +go 1.13 + +require ( + github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 + github.com/gogo/protobuf v1.3.1 // indirect + github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect + github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10 // indirect + github.com/ipfs/go-datastore v0.1.1 + github.com/jbenet/goprocess v0.1.3 // indirect + github.com/minio/sha256-simd v0.1.1 // indirect + github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a // indirect + github.com/smartystreets/assertions v1.0.1 // indirect + github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect + github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0 + go.uber.org/multierr v1.4.0 + golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 // indirect + golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 // indirect + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/yaml.v2 v2.2.4 // indirect +) diff --git a/vendor/github.com/filecoin-project/go-statestore/go.sum b/vendor/github.com/filecoin-project/go-statestore/go.sum new file mode 100644 index 0000000000..d791d7b26a --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/go.sum @@ -0,0 +1,147 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10 h1:5mRf2p8Bv2iKiuPsGrQUrx38rdBm2T/03JCM6VWzoMc= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= +github.com/ipfs/go-datastore v0.1.1 h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-log v1.0.0 h1:BW3LQIiZzpNyolt84yvKNCd3FU+AK4VDw1hnHR+1aiI= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.9 h1:aoijQXYYl7Xtb2pUUP68R+ys1TlnlR3eX6wmozr0Hp4= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0 h1:efb/4CnrubzNGqQOeHErxyQ6rIsJb7GcgeSDF7fqWeI= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/github.com/filecoin-project/go-statestore/state.go b/vendor/github.com/filecoin-project/go-statestore/state.go new file mode 100644 index 0000000000..8c2604e2e2 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/state.go @@ -0,0 +1,93 @@ +package statestore + +import ( + "bytes" + "reflect" + + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-datastore" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +type StoredState struct { + ds datastore.Datastore + name datastore.Key +} + +func (st *StoredState) End() error { + has, err := st.ds.Has(st.name) + if err != nil { + return err + } + if !has { + return xerrors.Errorf("No state for %s", st.name) + } + if err := st.ds.Delete(st.name); err != nil { + return xerrors.Errorf("removing state from datastore: %w", err) + } + st.name = datastore.Key{} + st.ds = nil + + return nil +} + +func (st *StoredState) Get(out cbg.CBORUnmarshaler) error { + val, err := st.ds.Get(st.name) + if err != nil { + if xerrors.Is(err, datastore.ErrNotFound) { + return xerrors.Errorf("No state for %s: %w", st.name, err) + } + return err + } + + return out.UnmarshalCBOR(bytes.NewReader(val)) +} + +// mutator func(*T) error +func (st *StoredState) Mutate(mutator interface{}) error { + return st.mutate(cborMutator(mutator)) +} + +func (st *StoredState) mutate(mutator func([]byte) ([]byte, error)) error { + has, err := st.ds.Has(st.name) + if err != nil { + return err + } + if !has { + return xerrors.Errorf("No state for %s", st.name) + } + + cur, err := st.ds.Get(st.name) + if err != nil { + return err + } + + mutated, err := mutator(cur) + if err != nil { + return err + } + + return st.ds.Put(st.name, mutated) +} + +func cborMutator(mutator interface{}) func([]byte) ([]byte, error) { + rmut := reflect.ValueOf(mutator) + + return func(in []byte) ([]byte, error) { + state := reflect.New(rmut.Type().In(0).Elem()) + + err := cborutil.ReadCborRPC(bytes.NewReader(in), state.Interface()) + if err != nil { + return nil, err + } + + out := rmut.Call([]reflect.Value{state}) + + if err := out[0].Interface(); err != nil { + return nil, err.(error) + } + + return cborutil.Dump(state.Interface()) + } +} diff --git a/vendor/github.com/filecoin-project/go-statestore/store.go b/vendor/github.com/filecoin-project/go-statestore/store.go new file mode 100644 index 0000000000..1761b899fb --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/store.go @@ -0,0 +1,96 @@ +package statestore + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "go.uber.org/multierr" + "golang.org/x/xerrors" +) + +type StateStore struct { + ds datastore.Datastore +} + +func New(ds datastore.Datastore) *StateStore { + return &StateStore{ds: ds} +} + +func ToKey(k interface{}) datastore.Key { + switch t := k.(type) { + case uint64: + return datastore.NewKey(fmt.Sprint(t)) + case fmt.Stringer: + return datastore.NewKey(t.String()) + default: + panic("unexpected key type") + } +} + +func (st *StateStore) Begin(i interface{}, state interface{}) error { + k := ToKey(i) + has, err := st.ds.Has(k) + if err != nil { + return err + } + if has { + return xerrors.Errorf("already tracking state for %v", i) + } + + b, err := cborutil.Dump(state) + if err != nil { + return err + } + + return st.ds.Put(k, b) +} + +func (st *StateStore) Get(i interface{}) *StoredState { + return &StoredState{ + ds: st.ds, + name: ToKey(i), + } +} + +func (st *StateStore) Has(i interface{}) (bool, error) { + return st.ds.Has(ToKey(i)) +} + +// out: *[]T +func (st *StateStore) List(out interface{}) error { + res, err := st.ds.Query(query.Query{}) + if err != nil { + return err + } + defer res.Close() + + outT := reflect.TypeOf(out).Elem().Elem() + rout := reflect.ValueOf(out) + + var errs error + + for { + res, ok := res.NextSync() + if !ok { + break + } + if res.Error != nil { + return res.Error + } + + elem := reflect.New(outT) + err := cborutil.ReadCborRPC(bytes.NewReader(res.Value), elem.Interface()) + if err != nil { + errs = multierr.Append(errs, xerrors.Errorf("decoding state for key '%s': %w", res.Key, err)) + continue + } + + rout.Elem().Set(reflect.Append(rout.Elem(), elem.Elem())) + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/go-statestore/store_test.go b/vendor/github.com/filecoin-project/go-statestore/store_test.go new file mode 100644 index 0000000000..68dd8f5868 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/store_test.go @@ -0,0 +1,83 @@ +package statestore + +import ( + "fmt" + "io" + "testing" + + "github.com/filecoin-project/go-cbor-util" + "github.com/ipfs/go-datastore" +) + +type Flarp struct { + x byte +} + +func (f *Flarp) UnmarshalCBOR(r io.Reader) error { + p := make([]byte, 1) + n, err := r.Read(p) + if n != 1 { + panic("somebody messed up") + } + f.x = p[0] + return err +} + +func (f *Flarp) MarshalCBOR(w io.Writer) error { + xs := []byte{f.x} + _, err := w.Write(xs) + return err +} + +func (f *Flarp) Blarg() string { + return fmt.Sprintf("%d", f.x) +} + +func TestList(t *testing.T) { + x1 := byte(64) + x2 := byte(42) + + ds := datastore.NewMapDatastore() + + e1, err := cborutil.Dump(&Flarp{x: x1}) + if err != nil { + t.Fatal(err) + } + + if err := ds.Put(datastore.NewKey("/2"), e1); err != nil { + t.Fatal(err) + } + + e2, err := cborutil.Dump(&Flarp{x: x2}) + if err != nil { + t.Fatal(err) + } + + if err := ds.Put(datastore.NewKey("/3"), e2); err != nil { + t.Fatal(err) + } + + st := &StateStore{ds: ds} + + var out []Flarp + if err := st.List(&out); err != nil { + t.Fatal(err) + } + + if len(out) != 2 { + t.Fatalf("wrong len (expected %d, got %d)", 2, len(out)) + } + + blargs := make(map[string]bool) + for _, v := range out { + blargs[v.Blarg()] = true + } + + if !blargs[fmt.Sprintf("%d", x1)] { + t.Fatalf("wrong data (missing Flarp#Blarg() == %d)", x1) + } + + if !blargs[fmt.Sprintf("%d", x2)] { + t.Fatalf("wrong data (missing Flarp#Blarg() == %d)", x2) + } +} diff --git a/vendor/github.com/filecoin-project/lotus/LICENSE-APACHE b/vendor/github.com/filecoin-project/lotus/LICENSE-APACHE new file mode 100644 index 0000000000..14478a3b60 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/filecoin-project/lotus/LICENSE-MIT b/vendor/github.com/filecoin-project/lotus/LICENSE-MIT new file mode 100644 index 0000000000..72dc60d84b --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/lotus/build/bootstrap.go b/vendor/github.com/filecoin-project/lotus/build/bootstrap.go new file mode 100644 index 0000000000..80c1529ff6 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/bootstrap.go @@ -0,0 +1,40 @@ +package build + +import ( + "context" + "os" + "strings" + + "github.com/filecoin-project/lotus/lib/addrutil" + "golang.org/x/xerrors" + + rice "github.com/GeertJohan/go.rice" + "github.com/libp2p/go-libp2p-core/peer" +) + +func BuiltinBootstrap() ([]peer.AddrInfo, error) { + if DisableBuiltinAssets { + return nil, nil + } + + var out []peer.AddrInfo + + b := rice.MustFindBox("bootstrap") + err := b.Walk("", func(path string, info os.FileInfo, err error) error { + if err != nil { + return xerrors.Errorf("failed to walk box: %w", err) + } + + if !strings.HasSuffix(path, ".pi") { + return nil + } + spi := b.MustString(path) + if spi == "" { + return nil + } + pi, err := addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n")) + out = append(out, pi...) + return err + }) + return out, err +} diff --git a/vendor/github.com/filecoin-project/lotus/build/flags.go b/vendor/github.com/filecoin-project/lotus/build/flags.go new file mode 100644 index 0000000000..33e9f6ede9 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/flags.go @@ -0,0 +1,15 @@ +package build + +// DisableBuiltinAssets disables the resolution of go.rice boxes that store +// built-in assets, such as proof parameters, bootstrap peers, genesis blocks, +// etc. +// +// When this value is set to true, it is expected that the user will +// provide any such configurations through the Lotus API itself. +// +// This is useful when you're using Lotus as a library, such as to orchestrate +// test scenarios, or for other purposes where you don't need to use the +// defaults shipped with the binary. +// +// For this flag to be effective, it must be enabled _before_ instantiating Lotus. +var DisableBuiltinAssets = false diff --git a/vendor/github.com/filecoin-project/lotus/build/forks.go b/vendor/github.com/filecoin-project/lotus/build/forks.go new file mode 100644 index 0000000000..5c93a93534 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/forks.go @@ -0,0 +1 @@ +package build diff --git a/vendor/github.com/filecoin-project/lotus/build/genesis.go b/vendor/github.com/filecoin-project/lotus/build/genesis.go new file mode 100644 index 0000000000..dc4ded2736 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/genesis.go @@ -0,0 +1,23 @@ +package build + +import ( + rice "github.com/GeertJohan/go.rice" + logging "github.com/ipfs/go-log/v2" +) + +// moved from now-defunct build/paramfetch.go +var log = logging.Logger("build") + +func MaybeGenesis() []byte { + builtinGen, err := rice.FindBox("genesis") + if err != nil { + log.Warnf("loading built-in genesis: %s", err) + return nil + } + genBytes, err := builtinGen.Bytes("devnet.car") + if err != nil { + log.Warnf("loading built-in genesis: %s", err) + } + + return genBytes +} diff --git a/vendor/github.com/filecoin-project/lotus/build/parameters.go b/vendor/github.com/filecoin-project/lotus/build/parameters.go new file mode 100644 index 0000000000..7d34a78312 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/parameters.go @@ -0,0 +1,7 @@ +package build + +import rice "github.com/GeertJohan/go.rice" + +func ParametersJSON() []byte { + return rice.MustFindBox("proof-params").MustBytes("parameters.json") +} diff --git a/vendor/github.com/filecoin-project/lotus/build/params_2k.go b/vendor/github.com/filecoin-project/lotus/build/params_2k.go new file mode 100644 index 0000000000..5a0e8fd612 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/params_2k.go @@ -0,0 +1,55 @@ +// +build debug 2k + +package build + +import ( + "math" + "os" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 0 + +const UpgradeSmokeHeight = -1 +const UpgradeIgnitionHeight = -2 +const UpgradeRefuelHeight = -3 +const UpgradeTapeHeight = -4 + +var UpgradeActorsV2Height = abi.ChainEpoch(10) +var UpgradeLiftoffHeight = abi.ChainEpoch(-5) + +const UpgradeKumquatHeight = -6 + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +func init() { + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + + if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" { + UpgradeActorsV2Height = math.MaxInt64 + UpgradeLiftoffHeight = 11 + } + + BuildType |= Build2k +} + +const BlockDelaySecs = uint64(4) + +const PropagationDelaySecs = uint64(1) + +// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after +// which the miner is slashed +// +// Epochs +const SlashablePowerDelay = 20 + +// Epochs +const InteractivePoRepConfidence = 6 diff --git a/vendor/github.com/filecoin-project/lotus/build/params_debug.go b/vendor/github.com/filecoin-project/lotus/build/params_debug.go new file mode 100644 index 0000000000..f679c9178a --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/params_debug.go @@ -0,0 +1,10 @@ +// +build debug + +package build + +func init() { + InsecurePoStValidation = true + BuildType |= BuildDebug +} + +// NOTE: Also includes settings from params_2k diff --git a/vendor/github.com/filecoin-project/lotus/build/params_shared_funcs.go b/vendor/github.com/filecoin-project/lotus/build/params_shared_funcs.go new file mode 100644 index 0000000000..40ccca50bd --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/params_shared_funcs.go @@ -0,0 +1,52 @@ +package build + +import ( + "sort" + + "github.com/filecoin-project/go-address" + + "github.com/libp2p/go-libp2p-core/protocol" + + "github.com/filecoin-project/go-state-types/abi" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +func DefaultSectorSize() abi.SectorSize { + szs := make([]abi.SectorSize, 0, len(miner0.SupportedProofTypes)) + for spt := range miner0.SupportedProofTypes { + ss, err := spt.SectorSize() + if err != nil { + panic(err) + } + + szs = append(szs, ss) + } + + sort.Slice(szs, func(i, j int) bool { + return szs[i] < szs[j] + }) + + return szs[0] +} + +// Core network constants + +func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) } +func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) } +func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { + return protocol.ID("/fil/kad/" + string(netName)) +} + +func UseNewestNetwork() bool { + // TODO: Put these in a container we can iterate over + if UpgradeBreezeHeight <= 0 && UpgradeSmokeHeight <= 0 && UpgradeActorsV2Height <= 0 { + return true + } + return false +} + +func SetAddressNetwork(n address.Network) { + address.CurrentNetwork = n +} diff --git a/vendor/github.com/filecoin-project/lotus/build/params_shared_vals.go b/vendor/github.com/filecoin-project/lotus/build/params_shared_vals.go new file mode 100644 index 0000000000..b804725942 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/params_shared_vals.go @@ -0,0 +1,119 @@ +// +build !testground + +package build + +import ( + "math/big" + "os" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/policy" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" +) + +// ///// +// Storage + +const UnixfsChunkSize uint64 = 1 << 20 +const UnixfsLinksPerLevel = 1024 + +// ///// +// Consensus / Network + +const AllowableClockDriftSecs = uint64(1) +const NewestNetworkVersion = network.Version6 +const ActorUpgradeNetworkVersion = network.Version4 + +// Epochs +const ForkLengthThreshold = Finality + +// Blocks (e) +var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) + +// Epochs +const Finality = policy.ChainFinality +const MessageConfidence = uint64(5) + +// constants for Weight calculation +// The ratio of weight contributed by short-term vs long-term factors in a given round +const WRatioNum = int64(1) +const WRatioDen = uint64(2) + +// ///// +// Proofs + +// Epochs +// TODO: unused +const SealRandomnessLookback = policy.SealRandomnessLookback + +// ///// +// Mining + +// Epochs +const TicketRandomnessLookback = abi.ChainEpoch(1) + +// ///// +// Address + +const AddressMainnetEnvVar = "_mainnet_" + +// ///// +// Devnet settings + +var Devnet = true + +const FilBase = uint64(2_000_000_000) +const FilAllocStorageMining = uint64(1_100_000_000) + +const FilecoinPrecision = uint64(1_000_000_000_000_000_000) +const FilReserved = uint64(300_000_000) + +var InitialRewardBalance *big.Int +var InitialFilReserved *big.Int + +// TODO: Move other important consts here + +func init() { + InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) + InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) + + InitialFilReserved = big.NewInt(int64(FilReserved)) + InitialFilReserved = InitialFilReserved.Mul(InitialFilReserved, big.NewInt(int64(FilecoinPrecision))) + + if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar { + SetAddressNetwork(address.Mainnet) + } +} + +// Sync +const BadBlockCacheSize = 1 << 15 + +// assuming 4000 messages per round, this lets us not lose any messages across a +// 10 block reorg. +const BlsSignatureCacheSize = 40000 + +// Size of signature verification cache +// 32k keeps the cache around 10MB in size, max +const VerifSigCacheSize = 32000 + +// /////// +// Limits + +// TODO: If this is gonna stay, it should move to specs-actors +const BlockMessageLimit = 10000 + +const BlockGasLimit = 10_000_000_000 +const BlockGasTarget = BlockGasLimit / 2 +const BaseFeeMaxChangeDenom = 8 // 12.5% +const InitialBaseFee = 100e6 +const MinimumBaseFee = 100 +const PackingEfficiencyNum = 4 +const PackingEfficiencyDenom = 5 + +// Actor consts +// TODO: Pull from actors when its made not private +var MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) diff --git a/vendor/github.com/filecoin-project/lotus/build/params_testground.go b/vendor/github.com/filecoin-project/lotus/build/params_testground.go new file mode 100644 index 0000000000..beee1c7279 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/params_testground.go @@ -0,0 +1,99 @@ +// +build testground + +// This file makes hardcoded parameters (const) configurable as vars. +// +// Its purpose is to unlock various degrees of flexibility and parametrization +// when writing Testground plans for Lotus. +// +package build + +import ( + "math/big" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/specs-actors/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +var ( + UnixfsChunkSize = uint64(1 << 20) + UnixfsLinksPerLevel = 1024 + + BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) + BlockMessageLimit = 512 + BlockGasLimit = int64(100_000_000_000) + BlockGasTarget = int64(BlockGasLimit / 2) + BaseFeeMaxChangeDenom = int64(8) // 12.5% + InitialBaseFee = int64(100e6) + MinimumBaseFee = int64(100) + BlockDelaySecs = uint64(builtin.EpochDurationSeconds) + PropagationDelaySecs = uint64(6) + + AllowableClockDriftSecs = uint64(1) + + Finality = policy.ChainFinality + ForkLengthThreshold = Finality + + SlashablePowerDelay = 20 + InteractivePoRepConfidence = 6 + + MessageConfidence uint64 = 5 + + WRatioNum = int64(1) + WRatioDen = uint64(2) + + BadBlockCacheSize = 1 << 15 + BlsSignatureCacheSize = 40000 + VerifSigCacheSize = 32000 + + SealRandomnessLookback = policy.SealRandomnessLookback + + TicketRandomnessLookback = abi.ChainEpoch(1) + + FilBase uint64 = 2_000_000_000 + FilAllocStorageMining uint64 = 1_400_000_000 + FilReserved uint64 = 300_000_000 + + FilecoinPrecision uint64 = 1_000_000_000_000_000_000 + + InitialRewardBalance = func() *big.Int { + v := big.NewInt(int64(FilAllocStorageMining)) + v = v.Mul(v, big.NewInt(int64(FilecoinPrecision))) + return v + }() + + InitialFilReserved = func() *big.Int { + v := big.NewInt(int64(FilReserved)) + v = v.Mul(v, big.NewInt(int64(FilecoinPrecision))) + return v + }() + + // Actor consts + // TODO: Pull from actors when its made not private + MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) + + PackingEfficiencyNum int64 = 4 + PackingEfficiencyDenom int64 = 5 + + UpgradeBreezeHeight abi.ChainEpoch = -1 + BreezeGasTampingDuration abi.ChainEpoch = 0 + + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeRefuelHeight abi.ChainEpoch = -3 + UpgradeTapeHeight abi.ChainEpoch = -4 + UpgradeActorsV2Height abi.ChainEpoch = 10 + UpgradeLiftoffHeight abi.ChainEpoch = -5 + UpgradeKumquatHeight abi.ChainEpoch = -6 + + DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, + } + + NewestNetworkVersion = network.Version5 + ActorUpgradeNetworkVersion = network.Version4 + + Devnet = true +) diff --git a/vendor/github.com/filecoin-project/lotus/build/testing_flags.go b/vendor/github.com/filecoin-project/lotus/build/testing_flags.go new file mode 100644 index 0000000000..1f26121e78 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/testing_flags.go @@ -0,0 +1,3 @@ +package build + +var InsecurePoStValidation = false diff --git a/vendor/github.com/filecoin-project/lotus/build/tools.go b/vendor/github.com/filecoin-project/lotus/build/tools.go new file mode 100644 index 0000000000..638c335a66 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/tools.go @@ -0,0 +1,7 @@ +//+build tools + +package build + +import ( + _ "github.com/whyrusleeping/bencher" +) diff --git a/vendor/github.com/filecoin-project/lotus/build/version.go b/vendor/github.com/filecoin-project/lotus/build/version.go new file mode 100644 index 0000000000..2d01526b97 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/version.go @@ -0,0 +1,100 @@ +package build + +import ( + "fmt" + + "golang.org/x/xerrors" +) + +var CurrentCommit string +var BuildType int + +const ( + BuildDefault = 0 + Build2k = 0x1 + BuildDebug = 0x3 +) + +func buildType() string { + switch BuildType { + case BuildDefault: + return "" + case BuildDebug: + return "+debug" + case Build2k: + return "+2k" + default: + return "+huh?" + } +} + +// BuildVersion is the local build version, set by build system +const BuildVersion = "1.1.0" + +func UserVersion() string { + return BuildVersion + buildType() + CurrentCommit +} + +type Version uint32 + +func newVer(major, minor, patch uint8) Version { + return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch)) +} + +// Ints returns (major, minor, patch) versions +func (ve Version) Ints() (uint32, uint32, uint32) { + v := uint32(ve) + return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask +} + +func (ve Version) String() string { + vmj, vmi, vp := ve.Ints() + return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp) +} + +func (ve Version) EqMajorMinor(v2 Version) bool { + return ve&minorMask == v2&minorMask +} + +type NodeType int + +const ( + NodeUnknown NodeType = iota + + NodeFull + NodeMiner + NodeWorker +) + +var RunningNodeType NodeType + +func VersionForType(nodeType NodeType) (Version, error) { + switch nodeType { + case NodeFull: + return FullAPIVersion, nil + case NodeMiner: + return MinerAPIVersion, nil + case NodeWorker: + return WorkerAPIVersion, nil + default: + return Version(0), xerrors.Errorf("unknown node type %d", nodeType) + } +} + +// semver versions of the rpc api exposed +var ( + FullAPIVersion = newVer(0, 17, 0) + MinerAPIVersion = newVer(0, 15, 0) + WorkerAPIVersion = newVer(0, 15, 0) +) + +//nolint:varcheck,deadcode +const ( + majorMask = 0xff0000 + minorMask = 0xffff00 + patchMask = 0xffffff + + majorOnlyMask = 0xff0000 + minorOnlyMask = 0x00ff00 + patchOnlyMask = 0x0000ff +) diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error.go b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error.go new file mode 100644 index 0000000000..12f802c8fc --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error.go @@ -0,0 +1,69 @@ +package aerrors + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/exitcode" + "golang.org/x/xerrors" +) + +func IsFatal(err ActorError) bool { + return err != nil && err.IsFatal() +} +func RetCode(err ActorError) exitcode.ExitCode { + if err == nil { + return 0 + } + return err.RetCode() +} + +type internalActorError interface { + ActorError + FormatError(p xerrors.Printer) (next error) + Unwrap() error +} + +type ActorError interface { + error + IsFatal() bool + RetCode() exitcode.ExitCode +} + +type actorError struct { + fatal bool + retCode exitcode.ExitCode + + msg string + frame xerrors.Frame + err error +} + +func (e *actorError) IsFatal() bool { + return e.fatal +} + +func (e *actorError) RetCode() exitcode.ExitCode { + return e.retCode +} + +func (e *actorError) Error() string { + return fmt.Sprint(e) +} +func (e *actorError) Format(s fmt.State, v rune) { xerrors.FormatError(e, s, v) } +func (e *actorError) FormatError(p xerrors.Printer) (next error) { + p.Print(e.msg) + if e.fatal { + p.Print(" (FATAL)") + } else { + p.Printf(" (RetCode=%d)", e.retCode) + } + + e.frame.Format(p) + return e.err +} + +func (e *actorError) Unwrap() error { + return e.err +} + +var _ internalActorError = (*actorError)(nil) diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/wrap.go b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/wrap.go new file mode 100644 index 0000000000..0552829f91 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/wrap.go @@ -0,0 +1,203 @@ +package aerrors + +import ( + "errors" + "fmt" + + "github.com/filecoin-project/go-state-types/exitcode" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" +) + +// New creates a new non-fatal error +func New(retCode exitcode.ExitCode, message string) ActorError { + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried creating an error and setting RetCode to 0", + frame: xerrors.Caller(1), + err: errors.New(message), + } + } + return &actorError{ + retCode: retCode, + + msg: message, + frame: xerrors.Caller(1), + } +} + +// Newf creates a new non-fatal error +func Newf(retCode exitcode.ExitCode, format string, args ...interface{}) ActorError { + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried creating an error and setting RetCode to 0", + frame: xerrors.Caller(1), + err: fmt.Errorf(format, args...), + } + } + return &actorError{ + retCode: retCode, + + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(1), + } +} + +// todo: bit hacky + +func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError { + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried creating an error and setting RetCode to 0", + frame: xerrors.Caller(skip), + err: fmt.Errorf(format, args...), + } + } + return &actorError{ + retCode: retCode, + + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(skip), + } +} + +func Fatal(message string, args ...interface{}) ActorError { + return &actorError{ + fatal: true, + msg: message, + frame: xerrors.Caller(1), + } +} + +func Fatalf(format string, args ...interface{}) ActorError { + return &actorError{ + fatal: true, + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(1), + } +} + +// Wrap extens chain of errors with a message +func Wrap(err ActorError, message string) ActorError { + if err == nil { + return nil + } + return &actorError{ + fatal: IsFatal(err), + retCode: RetCode(err), + + msg: message, + frame: xerrors.Caller(1), + err: err, + } +} + +// Wrapf extens chain of errors with a message +func Wrapf(err ActorError, format string, args ...interface{}) ActorError { + if err == nil { + return nil + } + return &actorError{ + fatal: IsFatal(err), + retCode: RetCode(err), + + msg: fmt.Sprintf(format, args...), + frame: xerrors.Caller(1), + err: err, + } +} + +// Absorb takes and error and makes in not fatal ActorError +func Absorb(err error, retCode exitcode.ExitCode, msg string) ActorError { + if err == nil { + return nil + } + if aerr, ok := err.(ActorError); ok && IsFatal(aerr) { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried absorbing an error that is already a fatal error", + frame: xerrors.Caller(1), + err: err, + } + } + if retCode == 0 { + return &actorError{ + fatal: true, + retCode: 0, + + msg: "tried absorbing an error and setting RetCode to 0", + frame: xerrors.Caller(1), + err: err, + } + } + + return &actorError{ + fatal: false, + retCode: retCode, + + msg: msg, + frame: xerrors.Caller(1), + err: err, + } +} + +// Escalate takes and error and escalates it into a fatal error +func Escalate(err error, msg string) ActorError { + if err == nil { + return nil + } + return &actorError{ + fatal: true, + + msg: msg, + frame: xerrors.Caller(1), + err: err, + } +} + +func HandleExternalError(err error, msg string) ActorError { + if err == nil { + return nil + } + + if aerr, ok := err.(ActorError); ok { + return &actorError{ + fatal: IsFatal(aerr), + retCode: RetCode(aerr), + + msg: msg, + frame: xerrors.Caller(1), + err: aerr, + } + } + + if xerrors.Is(err, &cbor.SerializationError{}) { + return &actorError{ + fatal: false, + retCode: 253, + msg: msg, + frame: xerrors.Caller(1), + err: err, + } + } + + return &actorError{ + fatal: false, + retCode: 219, + + msg: msg, + frame: xerrors.Caller(1), + err: err, + } +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/params.go b/vendor/github.com/filecoin-project/lotus/chain/actors/params.go new file mode 100644 index 0000000000..e14dcafc9f --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/actors/params.go @@ -0,0 +1,17 @@ +package actors + +import ( + "bytes" + + "github.com/filecoin-project/lotus/chain/actors/aerrors" + cbg "github.com/whyrusleeping/cbor-gen" +) + +func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { + buf := new(bytes.Buffer) + if err := i.MarshalCBOR(buf); err != nil { + // TODO: shouldnt this be a fatal error? + return nil, aerrors.Absorb(err, 1, "failed to encode parameter") + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/actor.go b/vendor/github.com/filecoin-project/lotus/chain/types/actor.go new file mode 100644 index 0000000000..a9974a01f4 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/actor.go @@ -0,0 +1,17 @@ +package types + +import ( + "errors" + + "github.com/ipfs/go-cid" +) + +var ErrActorNotFound = errors.New("actor not found") + +type Actor struct { + // Identifies the type of actor (string coded as a CID), see `chain/actors/actors.go`. + Code cid.Cid + Head cid.Cid + Nonce uint64 + Balance BigInt +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/bigint.go b/vendor/github.com/filecoin-project/lotus/chain/types/bigint.go new file mode 100644 index 0000000000..da4857d5b4 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/bigint.go @@ -0,0 +1,96 @@ +package types + +import ( + "fmt" + "math/big" + + big2 "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/build" +) + +const BigIntMaxSerializedLen = 128 // is this big enough? or too big? + +var TotalFilecoinInt = FromFil(build.FilBase) + +var EmptyInt = BigInt{} + +type BigInt = big2.Int + +func NewInt(i uint64) BigInt { + return BigInt{Int: big.NewInt(0).SetUint64(i)} +} + +func FromFil(i uint64) BigInt { + return BigMul(NewInt(i), NewInt(build.FilecoinPrecision)) +} + +func BigFromBytes(b []byte) BigInt { + i := big.NewInt(0).SetBytes(b) + return BigInt{Int: i} +} + +func BigFromString(s string) (BigInt, error) { + v, ok := big.NewInt(0).SetString(s, 10) + if !ok { + return BigInt{}, fmt.Errorf("failed to parse string as a big int") + } + + return BigInt{Int: v}, nil +} + +func BigMul(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Mul(a.Int, b.Int)} +} + +func BigDiv(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Div(a.Int, b.Int)} +} + +func BigMod(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Mod(a.Int, b.Int)} +} + +func BigAdd(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Add(a.Int, b.Int)} +} + +func BigSub(a, b BigInt) BigInt { + return BigInt{Int: big.NewInt(0).Sub(a.Int, b.Int)} +} + +func BigCmp(a, b BigInt) int { + return a.Int.Cmp(b.Int) +} + +var byteSizeUnits = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB"} + +func SizeStr(bi BigInt) string { + r := new(big.Rat).SetInt(bi.Int) + den := big.NewRat(1, 1024) + + var i int + for f, _ := r.Float64(); f >= 1024 && i+1 < len(byteSizeUnits); f, _ = r.Float64() { + i++ + r = r.Mul(r, den) + } + + f, _ := r.Float64() + return fmt.Sprintf("%.4g %s", f, byteSizeUnits[i]) +} + +var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"} + +func DeciStr(bi BigInt) string { + r := new(big.Rat).SetInt(bi.Int) + den := big.NewRat(1, 1024) + + var i int + for f, _ := r.Float64(); f >= 1024 && i+1 < len(deciUnits); f, _ = r.Float64() { + i++ + r = r.Mul(r, den) + } + + f, _ := r.Float64() + return fmt.Sprintf("%.3g %s", f, deciUnits[i]) +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/blockheader.go b/vendor/github.com/filecoin-project/lotus/chain/types/blockheader.go new file mode 100644 index 0000000000..0ec33fe421 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/blockheader.go @@ -0,0 +1,249 @@ +package types + +import ( + "bytes" + "math/big" + + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + + "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + xerrors "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/build" +) + +type Ticket struct { + VRFProof []byte +} + +func (t *Ticket) Quality() float64 { + ticketHash := blake2b.Sum256(t.VRFProof) + ticketNum := BigFromBytes(ticketHash[:]).Int + ticketDenu := big.NewInt(1) + ticketDenu.Lsh(ticketDenu, 256) + tv, _ := new(big.Rat).SetFrac(ticketNum, ticketDenu).Float64() + tq := 1 - tv + return tq +} + +type BeaconEntry struct { + Round uint64 + Data []byte +} + +func NewBeaconEntry(round uint64, data []byte) BeaconEntry { + return BeaconEntry{ + Round: round, + Data: data, + } +} + +type BlockHeader struct { + Miner address.Address // 0 + + Ticket *Ticket // 1 + + ElectionProof *ElectionProof // 2 + + BeaconEntries []BeaconEntry // 3 + + WinPoStProof []proof.PoStProof // 4 + + Parents []cid.Cid // 5 + + ParentWeight BigInt // 6 + + Height abi.ChainEpoch // 7 + + ParentStateRoot cid.Cid // 8 + + ParentMessageReceipts cid.Cid // 8 + + Messages cid.Cid // 10 + + BLSAggregate *crypto.Signature // 11 + + Timestamp uint64 // 12 + + BlockSig *crypto.Signature // 13 + + ForkSignaling uint64 // 14 + + // ParentBaseFee is the base fee after executing parent tipset + ParentBaseFee abi.TokenAmount // 15 + + // internal + validated bool // true if the signature has been validated +} + +func (blk *BlockHeader) ToStorageBlock() (block.Block, error) { + data, err := blk.Serialize() + if err != nil { + return nil, err + } + + c, err := abi.CidBuilder.Sum(data) + if err != nil { + return nil, err + } + + return block.NewBlockWithCid(data, c) +} + +func (blk *BlockHeader) Cid() cid.Cid { + sb, err := blk.ToStorageBlock() + if err != nil { + panic(err) // Not sure i'm entirely comfortable with this one, needs to be checked + } + + return sb.Cid() +} + +func DecodeBlock(b []byte) (*BlockHeader, error) { + var blk BlockHeader + if err := blk.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, err + } + + return &blk, nil +} + +func (blk *BlockHeader) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := blk.MarshalCBOR(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (blk *BlockHeader) LastTicket() *Ticket { + return blk.Ticket +} + +func (blk *BlockHeader) SigningBytes() ([]byte, error) { + blkcopy := *blk + blkcopy.BlockSig = nil + + return blkcopy.Serialize() +} + +func (blk *BlockHeader) SetValidated() { + blk.validated = true +} + +func (blk *BlockHeader) IsValidated() bool { + return blk.validated +} + +type MsgMeta struct { + BlsMessages cid.Cid + SecpkMessages cid.Cid +} + +func (mm *MsgMeta) Cid() cid.Cid { + b, err := mm.ToStorageBlock() + if err != nil { + panic(err) // also maybe sketchy + } + return b.Cid() +} + +func (mm *MsgMeta) ToStorageBlock() (block.Block, error) { + var buf bytes.Buffer + if err := mm.MarshalCBOR(&buf); err != nil { + return nil, xerrors.Errorf("failed to marshal MsgMeta: %w", err) + } + + c, err := abi.CidBuilder.Sum(buf.Bytes()) + if err != nil { + return nil, err + } + + return block.NewBlockWithCid(buf.Bytes(), c) +} + +func CidArrsEqual(a, b []cid.Cid) bool { + if len(a) != len(b) { + return false + } + + // order ignoring compare... + s := make(map[cid.Cid]bool) + for _, c := range a { + s[c] = true + } + + for _, c := range b { + if !s[c] { + return false + } + } + return true +} + +func CidArrsSubset(a, b []cid.Cid) bool { + // order ignoring compare... + s := make(map[cid.Cid]bool) + for _, c := range b { + s[c] = true + } + + for _, c := range a { + if !s[c] { + return false + } + } + return true +} + +func CidArrsContains(a []cid.Cid, b cid.Cid) bool { + for _, elem := range a { + if elem.Equals(b) { + return true + } + } + return false +} + +var blocksPerEpoch = NewInt(build.BlocksPerEpoch) + +const sha256bits = 256 + +func IsTicketWinner(vrfTicket []byte, mypow BigInt, totpow BigInt) bool { + /* + Need to check that + (h(vrfout) + 1) / (max(h) + 1) <= e * myPower / totalPower + max(h) == 2^256-1 + which in terms of integer math means: + (h(vrfout) + 1) * totalPower <= e * myPower * 2^256 + in 2^256 space, it is equivalent to: + h(vrfout) * totalPower < e * myPower * 2^256 + + */ + + h := blake2b.Sum256(vrfTicket) + + lhs := BigFromBytes(h[:]).Int + lhs = lhs.Mul(lhs, totpow.Int) + + // rhs = sectorSize * 2^256 + // rhs = sectorSize << 256 + rhs := new(big.Int).Lsh(mypow.Int, sha256bits) + rhs = rhs.Mul(rhs, blocksPerEpoch.Int) + + // h(vrfout) * totalPower < e * sectorSize * 2^256? + return lhs.Cmp(rhs) < 0 +} + +func (t *Ticket) Equals(ot *Ticket) bool { + return bytes.Equal(t.VRFProof, ot.VRFProof) +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/blockmsg.go b/vendor/github.com/filecoin-project/lotus/chain/types/blockmsg.go new file mode 100644 index 0000000000..f3114499d6 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/blockmsg.go @@ -0,0 +1,34 @@ +package types + +import ( + "bytes" + + "github.com/ipfs/go-cid" +) + +type BlockMsg struct { + Header *BlockHeader + BlsMessages []cid.Cid + SecpkMessages []cid.Cid +} + +func DecodeBlockMsg(b []byte) (*BlockMsg, error) { + var bm BlockMsg + if err := bm.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, err + } + + return &bm, nil +} + +func (bm *BlockMsg) Cid() cid.Cid { + return bm.Header.Cid() +} + +func (bm *BlockMsg) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := bm.MarshalCBOR(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/cbor_gen.go b/vendor/github.com/filecoin-project/lotus/chain/types/cbor_gen.go new file mode 100644 index 0000000000..d063ce8c9f --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/cbor_gen.go @@ -0,0 +1,1764 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package types + +import ( + "fmt" + "io" + + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + exitcode "github.com/filecoin-project/go-state-types/exitcode" + proof "github.com/filecoin-project/specs-actors/actors/runtime/proof" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufBlockHeader = []byte{144} + +func (t *BlockHeader) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufBlockHeader); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(w); err != nil { + return err + } + + // t.Ticket (types.Ticket) (struct) + if err := t.Ticket.MarshalCBOR(w); err != nil { + return err + } + + // t.ElectionProof (types.ElectionProof) (struct) + if err := t.ElectionProof.MarshalCBOR(w); err != nil { + return err + } + + // t.BeaconEntries ([]types.BeaconEntry) (slice) + if len(t.BeaconEntries) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.BeaconEntries was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BeaconEntries))); err != nil { + return err + } + for _, v := range t.BeaconEntries { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.WinPoStProof ([]proof.PoStProof) (slice) + if len(t.WinPoStProof) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.WinPoStProof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.WinPoStProof))); err != nil { + return err + } + for _, v := range t.WinPoStProof { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.Parents ([]cid.Cid) (slice) + if len(t.Parents) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Parents was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Parents))); err != nil { + return err + } + for _, v := range t.Parents { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Parents: %w", err) + } + } + + // t.ParentWeight (big.Int) (struct) + if err := t.ParentWeight.MarshalCBOR(w); err != nil { + return err + } + + // t.Height (abi.ChainEpoch) (int64) + if t.Height >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Height)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil { + return err + } + } + + // t.ParentStateRoot (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.ParentStateRoot); err != nil { + return xerrors.Errorf("failed to write cid field t.ParentStateRoot: %w", err) + } + + // t.ParentMessageReceipts (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.ParentMessageReceipts); err != nil { + return xerrors.Errorf("failed to write cid field t.ParentMessageReceipts: %w", err) + } + + // t.Messages (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Messages); err != nil { + return xerrors.Errorf("failed to write cid field t.Messages: %w", err) + } + + // t.BLSAggregate (crypto.Signature) (struct) + if err := t.BLSAggregate.MarshalCBOR(w); err != nil { + return err + } + + // t.Timestamp (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { + return err + } + + // t.BlockSig (crypto.Signature) (struct) + if err := t.BlockSig.MarshalCBOR(w); err != nil { + return err + } + + // t.ForkSignaling (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ForkSignaling)); err != nil { + return err + } + + // t.ParentBaseFee (big.Int) (struct) + if err := t.ParentBaseFee.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { + *t = BlockHeader{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 16 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Ticket (types.Ticket) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Ticket = new(Ticket) + if err := t.Ticket.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Ticket pointer: %w", err) + } + } + + } + // t.ElectionProof (types.ElectionProof) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.ElectionProof = new(ElectionProof) + if err := t.ElectionProof.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ElectionProof pointer: %w", err) + } + } + + } + // t.BeaconEntries ([]types.BeaconEntry) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BeaconEntries: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BeaconEntries = make([]BeaconEntry, extra) + } + + for i := 0; i < int(extra); i++ { + + var v BeaconEntry + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.BeaconEntries[i] = v + } + + // t.WinPoStProof ([]proof.PoStProof) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.WinPoStProof: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.WinPoStProof = make([]proof.PoStProof, extra) + } + + for i := 0; i < int(extra); i++ { + + var v proof.PoStProof + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.WinPoStProof[i] = v + } + + // t.Parents ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Parents: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Parents = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.Parents failed: %w", err) + } + t.Parents[i] = c + } + + // t.ParentWeight (big.Int) (struct) + + { + + if err := t.ParentWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ParentWeight: %w", err) + } + + } + // t.Height (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Height = abi.ChainEpoch(extraI) + } + // t.ParentStateRoot (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ParentStateRoot: %w", err) + } + + t.ParentStateRoot = c + + } + // t.ParentMessageReceipts (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ParentMessageReceipts: %w", err) + } + + t.ParentMessageReceipts = c + + } + // t.Messages (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Messages: %w", err) + } + + t.Messages = c + + } + // t.BLSAggregate (crypto.Signature) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.BLSAggregate = new(crypto.Signature) + if err := t.BLSAggregate.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.BLSAggregate pointer: %w", err) + } + } + + } + // t.Timestamp (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Timestamp = uint64(extra) + + } + // t.BlockSig (crypto.Signature) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.BlockSig = new(crypto.Signature) + if err := t.BlockSig.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.BlockSig pointer: %w", err) + } + } + + } + // t.ForkSignaling (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ForkSignaling = uint64(extra) + + } + // t.ParentBaseFee (big.Int) (struct) + + { + + if err := t.ParentBaseFee.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ParentBaseFee: %w", err) + } + + } + return nil +} + +var lengthBufTicket = []byte{129} + +func (t *Ticket) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufTicket); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.VRFProof ([]uint8) (slice) + if len(t.VRFProof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.VRFProof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.VRFProof))); err != nil { + return err + } + + if _, err := w.Write(t.VRFProof[:]); err != nil { + return err + } + return nil +} + +func (t *Ticket) UnmarshalCBOR(r io.Reader) error { + *t = Ticket{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.VRFProof ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.VRFProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.VRFProof[:]); err != nil { + return err + } + return nil +} + +var lengthBufElectionProof = []byte{130} + +func (t *ElectionProof) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufElectionProof); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.WinCount (int64) (int64) + if t.WinCount >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WinCount)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WinCount-1)); err != nil { + return err + } + } + + // t.VRFProof ([]uint8) (slice) + if len(t.VRFProof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.VRFProof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.VRFProof))); err != nil { + return err + } + + if _, err := w.Write(t.VRFProof[:]); err != nil { + return err + } + return nil +} + +func (t *ElectionProof) UnmarshalCBOR(r io.Reader) error { + *t = ElectionProof{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.WinCount (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.WinCount = int64(extraI) + } + // t.VRFProof ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.VRFProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.VRFProof[:]); err != nil { + return err + } + return nil +} + +var lengthBufMessage = []byte{138} + +func (t *Message) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMessage); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Version (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { + return err + } + + // t.To (address.Address) (struct) + if err := t.To.MarshalCBOR(w); err != nil { + return err + } + + // t.From (address.Address) (struct) + if err := t.From.MarshalCBOR(w); err != nil { + return err + } + + // t.Nonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + // t.Value (big.Int) (struct) + if err := t.Value.MarshalCBOR(w); err != nil { + return err + } + + // t.GasLimit (int64) (int64) + if t.GasLimit >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.GasLimit)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.GasLimit-1)); err != nil { + return err + } + } + + // t.GasFeeCap (big.Int) (struct) + if err := t.GasFeeCap.MarshalCBOR(w); err != nil { + return err + } + + // t.GasPremium (big.Int) (struct) + if err := t.GasPremium.MarshalCBOR(w); err != nil { + return err + } + + // t.Method (abi.MethodNum) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil { + return err + } + + // t.Params ([]uint8) (slice) + if len(t.Params) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Params was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Params))); err != nil { + return err + } + + if _, err := w.Write(t.Params[:]); err != nil { + return err + } + return nil +} + +func (t *Message) UnmarshalCBOR(r io.Reader) error { + *t = Message{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 10 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Version (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Version = uint64(extra) + + } + // t.To (address.Address) (struct) + + { + + if err := t.To.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.To: %w", err) + } + + } + // t.From (address.Address) (struct) + + { + + if err := t.From.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.From: %w", err) + } + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + // t.Value (big.Int) (struct) + + { + + if err := t.Value.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Value: %w", err) + } + + } + // t.GasLimit (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.GasLimit = int64(extraI) + } + // t.GasFeeCap (big.Int) (struct) + + { + + if err := t.GasFeeCap.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.GasFeeCap: %w", err) + } + + } + // t.GasPremium (big.Int) (struct) + + { + + if err := t.GasPremium.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.GasPremium: %w", err) + } + + } + // t.Method (abi.MethodNum) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Method = abi.MethodNum(extra) + + } + // t.Params ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Params: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Params[:]); err != nil { + return err + } + return nil +} + +var lengthBufSignedMessage = []byte{130} + +func (t *SignedMessage) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSignedMessage); err != nil { + return err + } + + // t.Message (types.Message) (struct) + if err := t.Message.MarshalCBOR(w); err != nil { + return err + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *SignedMessage) UnmarshalCBOR(r io.Reader) error { + *t = SignedMessage{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Message (types.Message) (struct) + + { + + if err := t.Message.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Message: %w", err) + } + + } + // t.Signature (crypto.Signature) (struct) + + { + + if err := t.Signature.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Signature: %w", err) + } + + } + return nil +} + +var lengthBufMsgMeta = []byte{130} + +func (t *MsgMeta) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMsgMeta); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.BlsMessages (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.BlsMessages); err != nil { + return xerrors.Errorf("failed to write cid field t.BlsMessages: %w", err) + } + + // t.SecpkMessages (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.SecpkMessages); err != nil { + return xerrors.Errorf("failed to write cid field t.SecpkMessages: %w", err) + } + + return nil +} + +func (t *MsgMeta) UnmarshalCBOR(r io.Reader) error { + *t = MsgMeta{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.BlsMessages (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.BlsMessages: %w", err) + } + + t.BlsMessages = c + + } + // t.SecpkMessages (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.SecpkMessages: %w", err) + } + + t.SecpkMessages = c + + } + return nil +} + +var lengthBufActor = []byte{132} + +func (t *Actor) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufActor); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Code (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Code); err != nil { + return xerrors.Errorf("failed to write cid field t.Code: %w", err) + } + + // t.Head (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Head); err != nil { + return xerrors.Errorf("failed to write cid field t.Head: %w", err) + } + + // t.Nonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + // t.Balance (big.Int) (struct) + if err := t.Balance.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Actor) UnmarshalCBOR(r io.Reader) error { + *t = Actor{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Code (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Code: %w", err) + } + + t.Code = c + + } + // t.Head (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Head: %w", err) + } + + t.Head = c + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + // t.Balance (big.Int) (struct) + + { + + if err := t.Balance.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Balance: %w", err) + } + + } + return nil +} + +var lengthBufMessageReceipt = []byte{131} + +func (t *MessageReceipt) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMessageReceipt); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ExitCode (exitcode.ExitCode) (int64) + if t.ExitCode >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil { + return err + } + } + + // t.Return ([]uint8) (slice) + if len(t.Return) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Return was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Return))); err != nil { + return err + } + + if _, err := w.Write(t.Return[:]); err != nil { + return err + } + + // t.GasUsed (int64) (int64) + if t.GasUsed >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.GasUsed)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.GasUsed-1)); err != nil { + return err + } + } + return nil +} + +func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error { + *t = MessageReceipt{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ExitCode (exitcode.ExitCode) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ExitCode = exitcode.ExitCode(extraI) + } + // t.Return ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Return: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Return = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Return[:]); err != nil { + return err + } + // t.GasUsed (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.GasUsed = int64(extraI) + } + return nil +} + +var lengthBufBlockMsg = []byte{131} + +func (t *BlockMsg) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufBlockMsg); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Header (types.BlockHeader) (struct) + if err := t.Header.MarshalCBOR(w); err != nil { + return err + } + + // t.BlsMessages ([]cid.Cid) (slice) + if len(t.BlsMessages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.BlsMessages was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BlsMessages))); err != nil { + return err + } + for _, v := range t.BlsMessages { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.BlsMessages: %w", err) + } + } + + // t.SecpkMessages ([]cid.Cid) (slice) + if len(t.SecpkMessages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.SecpkMessages was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.SecpkMessages))); err != nil { + return err + } + for _, v := range t.SecpkMessages { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.SecpkMessages: %w", err) + } + } + return nil +} + +func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { + *t = BlockMsg{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Header (types.BlockHeader) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Header = new(BlockHeader) + if err := t.Header.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Header pointer: %w", err) + } + } + + } + // t.BlsMessages ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.BlsMessages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.BlsMessages = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.BlsMessages failed: %w", err) + } + t.BlsMessages[i] = c + } + + // t.SecpkMessages ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.SecpkMessages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.SecpkMessages = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.SecpkMessages failed: %w", err) + } + t.SecpkMessages[i] = c + } + + return nil +} + +var lengthBufExpTipSet = []byte{131} + +func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufExpTipSet); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Cids ([]cid.Cid) (slice) + if len(t.Cids) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Cids was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Cids))); err != nil { + return err + } + for _, v := range t.Cids { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Cids: %w", err) + } + } + + // t.Blocks ([]*types.BlockHeader) (slice) + if len(t.Blocks) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Blocks was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Blocks))); err != nil { + return err + } + for _, v := range t.Blocks { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.Height (abi.ChainEpoch) (int64) + if t.Height >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Height)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil { + return err + } + } + return nil +} + +func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { + *t = ExpTipSet{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Cids ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Cids: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Cids = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.Cids failed: %w", err) + } + t.Cids[i] = c + } + + // t.Blocks ([]*types.BlockHeader) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Blocks: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Blocks = make([]*BlockHeader, extra) + } + + for i := 0; i < int(extra); i++ { + + var v BlockHeader + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Blocks[i] = &v + } + + // t.Height (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Height = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufBeaconEntry = []byte{130} + +func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufBeaconEntry); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Round (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Round)); err != nil { + return err + } + + // t.Data ([]uint8) (slice) + if len(t.Data) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Data was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Data))); err != nil { + return err + } + + if _, err := w.Write(t.Data[:]); err != nil { + return err + } + return nil +} + +func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) error { + *t = BeaconEntry{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Round (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Round = uint64(extra) + + } + // t.Data ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Data: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Data = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Data[:]); err != nil { + return err + } + return nil +} + +var lengthBufStateRoot = []byte{131} + +func (t *StateRoot) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStateRoot); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Version (types.StateTreeVersion) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { + return err + } + + // t.Actors (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Actors); err != nil { + return xerrors.Errorf("failed to write cid field t.Actors: %w", err) + } + + // t.Info (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Info); err != nil { + return xerrors.Errorf("failed to write cid field t.Info: %w", err) + } + + return nil +} + +func (t *StateRoot) UnmarshalCBOR(r io.Reader) error { + *t = StateRoot{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Version (types.StateTreeVersion) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Version = StateTreeVersion(extra) + + } + // t.Actors (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Actors: %w", err) + } + + t.Actors = c + + } + // t.Info (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Info: %w", err) + } + + t.Info = c + + } + return nil +} + +var lengthBufStateInfo0 = []byte{128} + +func (t *StateInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStateInfo0); err != nil { + return err + } + + return nil +} + +func (t *StateInfo0) UnmarshalCBOR(r io.Reader) error { + *t = StateInfo0{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 0 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/execresult.go b/vendor/github.com/filecoin-project/lotus/chain/types/execresult.go new file mode 100644 index 0000000000..6fc93fac6d --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/execresult.go @@ -0,0 +1,103 @@ +package types + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + "time" +) + +type ExecutionTrace struct { + Msg *Message + MsgRct *MessageReceipt + Error string + Duration time.Duration + GasCharges []*GasTrace + + Subcalls []ExecutionTrace +} + +type GasTrace struct { + Name string + + Location []Loc `json:"loc"` + TotalGas int64 `json:"tg"` + ComputeGas int64 `json:"cg"` + StorageGas int64 `json:"sg"` + TotalVirtualGas int64 `json:"vtg"` + VirtualComputeGas int64 `json:"vcg"` + VirtualStorageGas int64 `json:"vsg"` + + TimeTaken time.Duration `json:"tt"` + Extra interface{} `json:"ex,omitempty"` + + Callers []uintptr `json:"-"` +} + +type Loc struct { + File string + Line int + Function string +} + +func (l Loc) Show() bool { + ignorePrefix := []string{ + "reflect.", + "github.com/filecoin-project/lotus/chain/vm.(*Invoker).transform", + "github.com/filecoin-project/go-amt-ipld/", + } + for _, pre := range ignorePrefix { + if strings.HasPrefix(l.Function, pre) { + return false + } + } + return true +} +func (l Loc) String() string { + file := strings.Split(l.File, "/") + + fn := strings.Split(l.Function, "/") + var fnpkg string + if len(fn) > 2 { + fnpkg = strings.Join(fn[len(fn)-2:], "/") + } else { + fnpkg = l.Function + } + + return fmt.Sprintf("%s@%s:%d", fnpkg, file[len(file)-1], l.Line) +} + +func (l Loc) Important() bool { + if strings.HasPrefix(l.Function, "github.com/filecoin-project/specs-actors/actors/builtin") { + return true + } + return false +} + +func (gt *GasTrace) MarshalJSON() ([]byte, error) { + type GasTraceCopy GasTrace + if len(gt.Location) == 0 { + if len(gt.Callers) != 0 { + frames := runtime.CallersFrames(gt.Callers) + for { + frame, more := frames.Next() + if frame.Function == "github.com/filecoin-project/lotus/chain/vm.(*VM).ApplyMessage" { + break + } + l := Loc{ + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + gt.Location = append(gt.Location, l) + if !more { + break + } + } + } + } + + cpy := (*GasTraceCopy)(gt) + return json.Marshal(cpy) +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/fil.go b/vendor/github.com/filecoin-project/lotus/chain/types/fil.go new file mode 100644 index 0000000000..0ea77660c3 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/fil.go @@ -0,0 +1,94 @@ +package types + +import ( + "encoding" + "fmt" + "math/big" + "strings" + + "github.com/filecoin-project/lotus/build" +) + +type FIL BigInt + +func (f FIL) String() string { + return f.Unitless() + " FIL" +} + +func (f FIL) Unitless() string { + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(build.FilecoinPrecision))) + if r.Sign() == 0 { + return "0" + } + return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".") +} + +func (f FIL) Format(s fmt.State, ch rune) { + switch ch { + case 's', 'v': + fmt.Fprint(s, f.String()) + default: + f.Int.Format(s, ch) + } +} + +func (f FIL) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f FIL) UnmarshalText(text []byte) error { + p, err := ParseFIL(string(text)) + if err != nil { + return err + } + + f.Int.Set(p.Int) + return nil +} + +func ParseFIL(s string) (FIL, error) { + suffix := strings.TrimLeft(s, ".1234567890") + s = s[:len(s)-len(suffix)] + var attofil bool + if suffix != "" { + norm := strings.ToLower(strings.TrimSpace(suffix)) + switch norm { + case "", "fil": + case "attofil", "afil": + attofil = true + default: + return FIL{}, fmt.Errorf("unrecognized suffix: %q", suffix) + } + } + + r, ok := new(big.Rat).SetString(s) + if !ok { + return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s) + } + + if !attofil { + r = r.Mul(r, big.NewRat(int64(build.FilecoinPrecision), 1)) + } + + if !r.IsInt() { + var pref string + if attofil { + pref = "atto" + } + return FIL{}, fmt.Errorf("invalid %sFIL value: %q", pref, s) + } + + return FIL{r.Num()}, nil +} + +func MustParseFIL(s string) FIL { + n, err := ParseFIL(s) + if err != nil { + panic(err) + } + + return n +} + +var _ encoding.TextMarshaler = (*FIL)(nil) +var _ encoding.TextUnmarshaler = (*FIL)(nil) diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/fullblock.go b/vendor/github.com/filecoin-project/lotus/chain/types/fullblock.go new file mode 100644 index 0000000000..5511cea8c9 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/fullblock.go @@ -0,0 +1,13 @@ +package types + +import "github.com/ipfs/go-cid" + +type FullBlock struct { + Header *BlockHeader + BlsMessages []*Message + SecpkMessages []*SignedMessage +} + +func (fb *FullBlock) Cid() cid.Cid { + return fb.Header.Cid() +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/keystore.go b/vendor/github.com/filecoin-project/lotus/chain/types/keystore.go new file mode 100644 index 0000000000..107c1fbe3a --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/keystore.go @@ -0,0 +1,72 @@ +package types + +import ( + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-state-types/crypto" +) + +var ( + ErrKeyInfoNotFound = fmt.Errorf("key info not found") + ErrKeyExists = fmt.Errorf("key already exists") +) + +// KeyType defines a type of a key +type KeyType string + +func (kt *KeyType) UnmarshalJSON(bb []byte) error { + { + // first option, try unmarshaling as string + var s string + err := json.Unmarshal(bb, &s) + if err == nil { + *kt = KeyType(s) + return nil + } + } + + { + var b byte + err := json.Unmarshal(bb, &b) + if err != nil { + return fmt.Errorf("could not unmarshal KeyType either as string nor integer: %w", err) + } + bst := crypto.SigType(b) + + switch bst { + case crypto.SigTypeBLS: + *kt = KTBLS + case crypto.SigTypeSecp256k1: + *kt = KTSecp256k1 + default: + return fmt.Errorf("unknown sigtype: %d", bst) + } + log.Warnf("deprecation: integer style 'KeyType' is deprecated, switch to string style") + return nil + } +} + +const ( + KTBLS KeyType = "bls" + KTSecp256k1 KeyType = "secp256k1" + KTSecp256k1Ledger KeyType = "secp256k1-ledger" +) + +// KeyInfo is used for storing keys in KeyStore +type KeyInfo struct { + Type KeyType + PrivateKey []byte +} + +// KeyStore is used for storing secret keys +type KeyStore interface { + // List lists all the keys stored in the KeyStore + List() ([]string, error) + // Get gets a key out of keystore and returns KeyInfo corresponding to named key + Get(string) (KeyInfo, error) + // Put saves a key info under given name + Put(string, KeyInfo) error + // Delete removes a key from keystore + Delete(string) error +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/logs.go b/vendor/github.com/filecoin-project/lotus/chain/types/logs.go new file mode 100644 index 0000000000..e9a69e9d53 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/logs.go @@ -0,0 +1,17 @@ +package types + +import ( + "github.com/ipfs/go-cid" + "go.uber.org/zap/zapcore" +) + +type LogCids []cid.Cid + +var _ zapcore.ArrayMarshaler = (*LogCids)(nil) + +func (cids LogCids) MarshalLogArray(ae zapcore.ArrayEncoder) error { + for _, c := range cids { + ae.AppendString(c.String()) + } + return nil +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/message.go b/vendor/github.com/filecoin-project/lotus/chain/types/message.go new file mode 100644 index 0000000000..c53ecc7c16 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/message.go @@ -0,0 +1,204 @@ +package types + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/build" + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + xerrors "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" +) + +const MessageVersion = 0 + +type ChainMsg interface { + Cid() cid.Cid + VMMessage() *Message + ToStorageBlock() (block.Block, error) + // FIXME: This is the *message* length, this name is misleading. + ChainLength() int +} + +type Message struct { + Version uint64 + + To address.Address + From address.Address + + Nonce uint64 + + Value abi.TokenAmount + + GasLimit int64 + GasFeeCap abi.TokenAmount + GasPremium abi.TokenAmount + + Method abi.MethodNum + Params []byte +} + +func (m *Message) Caller() address.Address { + return m.From +} + +func (m *Message) Receiver() address.Address { + return m.To +} + +func (m *Message) ValueReceived() abi.TokenAmount { + return m.Value +} + +func DecodeMessage(b []byte) (*Message, error) { + var msg Message + if err := msg.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, err + } + + if msg.Version != MessageVersion { + return nil, fmt.Errorf("decoded message had incorrect version (%d)", msg.Version) + } + + return &msg, nil +} + +func (m *Message) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := m.MarshalCBOR(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (m *Message) ChainLength() int { + ser, err := m.Serialize() + if err != nil { + panic(err) + } + return len(ser) +} + +func (m *Message) ToStorageBlock() (block.Block, error) { + data, err := m.Serialize() + if err != nil { + return nil, err + } + + c, err := abi.CidBuilder.Sum(data) + if err != nil { + return nil, err + } + + return block.NewBlockWithCid(data, c) +} + +func (m *Message) Cid() cid.Cid { + b, err := m.ToStorageBlock() + if err != nil { + panic(fmt.Sprintf("failed to marshal message: %s", err)) // I think this is maybe sketchy, what happens if we try to serialize a message with an undefined address in it? + } + + return b.Cid() +} + +type mCid struct { + *RawMessage + CID cid.Cid +} + +type RawMessage Message + +func (m *Message) MarshalJSON() ([]byte, error) { + return json.Marshal(&mCid{ + RawMessage: (*RawMessage)(m), + CID: m.Cid(), + }) +} + +func (m *Message) RequiredFunds() BigInt { + return BigMul(m.GasFeeCap, NewInt(uint64(m.GasLimit))) +} + +func (m *Message) VMMessage() *Message { + return m +} + +func (m *Message) Equals(o *Message) bool { + return m.Cid() == o.Cid() +} + +func (m *Message) EqualCall(o *Message) bool { + m1 := *m + m2 := *o + + m1.GasLimit, m2.GasLimit = 0, 0 + m1.GasFeeCap, m2.GasFeeCap = big.Zero(), big.Zero() + m1.GasPremium, m2.GasPremium = big.Zero(), big.Zero() + + return (&m1).Equals(&m2) +} + +func (m *Message) ValidForBlockInclusion(minGas int64) error { + if m.Version != 0 { + return xerrors.New("'Version' unsupported") + } + + if m.To == address.Undef { + return xerrors.New("'To' address cannot be empty") + } + + if m.From == address.Undef { + return xerrors.New("'From' address cannot be empty") + } + + if m.Value.Int == nil { + return xerrors.New("'Value' cannot be nil") + } + + if m.Value.LessThan(big.Zero()) { + return xerrors.New("'Value' field cannot be negative") + } + + if m.Value.GreaterThan(TotalFilecoinInt) { + return xerrors.New("'Value' field cannot be greater than total filecoin supply") + } + + if m.GasFeeCap.Int == nil { + return xerrors.New("'GasFeeCap' cannot be nil") + } + + if m.GasFeeCap.LessThan(big.Zero()) { + return xerrors.New("'GasFeeCap' field cannot be negative") + } + + if m.GasPremium.Int == nil { + return xerrors.New("'GasPremium' cannot be nil") + } + + if m.GasPremium.LessThan(big.Zero()) { + return xerrors.New("'GasPremium' field cannot be negative") + } + + if m.GasPremium.GreaterThan(m.GasFeeCap) { + return xerrors.New("'GasFeeCap' less than 'GasPremium'") + } + + if m.GasLimit > build.BlockGasLimit { + return xerrors.New("'GasLimit' field cannot be greater than a block's gas limit") + } + + // since prices might vary with time, this is technically semantic validation + if m.GasLimit < minGas { + return xerrors.Errorf("'GasLimit' field cannot be less than the cost of storing a message on chain %d < %d", m.GasLimit, minGas) + } + + return nil +} + +const TestGasLimit = 100e6 diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/message_fuzz.go b/vendor/github.com/filecoin-project/lotus/chain/types/message_fuzz.go new file mode 100644 index 0000000000..4ef5f6ba22 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/message_fuzz.go @@ -0,0 +1,30 @@ +//+build gofuzz + +package types + +import "bytes" + +func FuzzMessage(data []byte) int { + var msg Message + err := msg.UnmarshalCBOR(bytes.NewReader(data)) + if err != nil { + return 0 + } + reData, err := msg.Serialize() + if err != nil { + panic(err) // ok + } + var msg2 Message + err = msg2.UnmarshalCBOR(bytes.NewReader(data)) + if err != nil { + panic(err) // ok + } + reData2, err := msg.Serialize() + if err != nil { + panic(err) // ok + } + if !bytes.Equal(reData, reData2) { + panic("reencoding not equal") // ok + } + return 1 +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/message_receipt.go b/vendor/github.com/filecoin-project/lotus/chain/types/message_receipt.go new file mode 100644 index 0000000000..57761680d2 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/message_receipt.go @@ -0,0 +1,17 @@ +package types + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/exitcode" +) + +type MessageReceipt struct { + ExitCode exitcode.ExitCode + Return []byte + GasUsed int64 +} + +func (mr *MessageReceipt) Equals(o *MessageReceipt) bool { + return mr.ExitCode == o.ExitCode && bytes.Equal(mr.Return, o.Return) && mr.GasUsed == o.GasUsed +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/signedmessage.go b/vendor/github.com/filecoin-project/lotus/chain/types/signedmessage.go new file mode 100644 index 0000000000..c539ac2402 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/signedmessage.go @@ -0,0 +1,107 @@ +package types + +import ( + "bytes" + "encoding/json" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +func (sm *SignedMessage) ToStorageBlock() (block.Block, error) { + if sm.Signature.Type == crypto.SigTypeBLS { + return sm.Message.ToStorageBlock() + } + + data, err := sm.Serialize() + if err != nil { + return nil, err + } + + c, err := abi.CidBuilder.Sum(data) + if err != nil { + return nil, err + } + + return block.NewBlockWithCid(data, c) +} + +func (sm *SignedMessage) Cid() cid.Cid { + if sm.Signature.Type == crypto.SigTypeBLS { + return sm.Message.Cid() + } + + sb, err := sm.ToStorageBlock() + if err != nil { + panic(err) + } + + return sb.Cid() +} + +type SignedMessage struct { + Message Message + Signature crypto.Signature +} + +func DecodeSignedMessage(data []byte) (*SignedMessage, error) { + var msg SignedMessage + if err := msg.UnmarshalCBOR(bytes.NewReader(data)); err != nil { + return nil, err + } + + return &msg, nil +} + +func (sm *SignedMessage) Serialize() ([]byte, error) { + buf := new(bytes.Buffer) + if err := sm.MarshalCBOR(buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type smCid struct { + *RawSignedMessage + CID cid.Cid +} + +type RawSignedMessage SignedMessage + +func (sm *SignedMessage) MarshalJSON() ([]byte, error) { + return json.Marshal(&smCid{ + RawSignedMessage: (*RawSignedMessage)(sm), + CID: sm.Cid(), + }) +} + +func (sm *SignedMessage) ChainLength() int { + var ser []byte + var err error + if sm.Signature.Type == crypto.SigTypeBLS { + // BLS chain message length doesn't include signature + ser, err = sm.Message.Serialize() + } else { + ser, err = sm.Serialize() + } + if err != nil { + panic(err) + } + return len(ser) +} + +func (sm *SignedMessage) Size() int { + serdata, err := sm.Serialize() + if err != nil { + log.Errorf("serializing message failed: %s", err) + return 0 + } + + return len(serdata) +} + +func (sm *SignedMessage) VMMessage() *Message { + return &sm.Message +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/tipset.go b/vendor/github.com/filecoin-project/lotus/chain/types/tipset.go new file mode 100644 index 0000000000..07eff37345 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/tipset.go @@ -0,0 +1,248 @@ +package types + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "sort" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + "github.com/minio/blake2b-simd" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +var log = logging.Logger("types") + +type TipSet struct { + cids []cid.Cid + blks []*BlockHeader + height abi.ChainEpoch +} + +type ExpTipSet struct { + Cids []cid.Cid + Blocks []*BlockHeader + Height abi.ChainEpoch +} + +func (ts *TipSet) MarshalJSON() ([]byte, error) { + // why didnt i just export the fields? Because the struct has methods with the + // same names already + return json.Marshal(ExpTipSet{ + Cids: ts.cids, + Blocks: ts.blks, + Height: ts.height, + }) +} + +func (ts *TipSet) UnmarshalJSON(b []byte) error { + var ets ExpTipSet + if err := json.Unmarshal(b, &ets); err != nil { + return err + } + + ots, err := NewTipSet(ets.Blocks) + if err != nil { + return err + } + + *ts = *ots + + return nil +} + +func (ts *TipSet) MarshalCBOR(w io.Writer) error { + if ts == nil { + _, err := w.Write(cbg.CborNull) + return err + } + return (&ExpTipSet{ + Cids: ts.cids, + Blocks: ts.blks, + Height: ts.height, + }).MarshalCBOR(w) +} + +func (ts *TipSet) UnmarshalCBOR(r io.Reader) error { + var ets ExpTipSet + if err := ets.UnmarshalCBOR(r); err != nil { + return err + } + + ots, err := NewTipSet(ets.Blocks) + if err != nil { + return err + } + + *ts = *ots + + return nil +} + +func tipsetSortFunc(blks []*BlockHeader) func(i, j int) bool { + return func(i, j int) bool { + ti := blks[i].LastTicket() + tj := blks[j].LastTicket() + + if ti.Equals(tj) { + log.Warnf("blocks have same ticket (%s %s)", blks[i].Miner, blks[j].Miner) + return bytes.Compare(blks[i].Cid().Bytes(), blks[j].Cid().Bytes()) < 0 + } + + return ti.Less(tj) + } +} + +// Checks: +// * A tipset is composed of at least one block. (Because of our variable +// number of blocks per tipset, determined by randomness, we do not impose +// an upper limit.) +// * All blocks have the same height. +// * All blocks have the same parents (same number of them and matching CIDs). +func NewTipSet(blks []*BlockHeader) (*TipSet, error) { + if len(blks) == 0 { + return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks") + } + + sort.Slice(blks, tipsetSortFunc(blks)) + + var ts TipSet + ts.cids = []cid.Cid{blks[0].Cid()} + ts.blks = blks + for _, b := range blks[1:] { + if b.Height != blks[0].Height { + return nil, fmt.Errorf("cannot create tipset with mismatching heights") + } + + if len(blks[0].Parents) != len(b.Parents) { + return nil, fmt.Errorf("cannot create tipset with mismatching number of parents") + } + + for i, cid := range b.Parents { + if cid != blks[0].Parents[i] { + return nil, fmt.Errorf("cannot create tipset with mismatching parents") + } + } + + ts.cids = append(ts.cids, b.Cid()) + + } + ts.height = blks[0].Height + + return &ts, nil +} + +func (ts *TipSet) Cids() []cid.Cid { + return ts.cids +} + +func (ts *TipSet) Key() TipSetKey { + if ts == nil { + return EmptyTSK + } + return NewTipSetKey(ts.cids...) +} + +func (ts *TipSet) Height() abi.ChainEpoch { + return ts.height +} + +func (ts *TipSet) Parents() TipSetKey { + return NewTipSetKey(ts.blks[0].Parents...) +} + +func (ts *TipSet) Blocks() []*BlockHeader { + return ts.blks +} + +func (ts *TipSet) Equals(ots *TipSet) bool { + if ts == nil && ots == nil { + return true + } + if ts == nil || ots == nil { + return false + } + + if ts.height != ots.height { + return false + } + + if len(ts.cids) != len(ots.cids) { + return false + } + + for i, cid := range ts.cids { + if cid != ots.cids[i] { + return false + } + } + + return true +} + +func (t *Ticket) Less(o *Ticket) bool { + tDigest := blake2b.Sum256(t.VRFProof) + oDigest := blake2b.Sum256(o.VRFProof) + return bytes.Compare(tDigest[:], oDigest[:]) < 0 +} + +func (ts *TipSet) MinTicket() *Ticket { + return ts.MinTicketBlock().Ticket +} + +func (ts *TipSet) MinTimestamp() uint64 { + minTs := ts.Blocks()[0].Timestamp + for _, bh := range ts.Blocks()[1:] { + if bh.Timestamp < minTs { + minTs = bh.Timestamp + } + } + return minTs +} + +func (ts *TipSet) MinTicketBlock() *BlockHeader { + blks := ts.Blocks() + + min := blks[0] + + for _, b := range blks[1:] { + if b.LastTicket().Less(min.LastTicket()) { + min = b + } + } + + return min +} + +func (ts *TipSet) ParentState() cid.Cid { + return ts.blks[0].ParentStateRoot +} + +func (ts *TipSet) ParentWeight() BigInt { + return ts.blks[0].ParentWeight +} + +func (ts *TipSet) Contains(oc cid.Cid) bool { + for _, c := range ts.cids { + if c == oc { + return true + } + } + return false +} + +func (ts *TipSet) IsChildOf(parent *TipSet) bool { + return CidArrsEqual(ts.Parents().Cids(), parent.Cids()) && + // FIXME: The height check might go beyond what is meant by + // "parent", but many parts of the code rely on the tipset's + // height for their processing logic at the moment to obviate it. + ts.height > parent.height +} + +func (ts *TipSet) String() string { + return fmt.Sprintf("%v", ts.cids) +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key.go b/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key.go new file mode 100644 index 0000000000..e5bc7750de --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key.go @@ -0,0 +1,125 @@ +package types + +import ( + "bytes" + "encoding/json" + "strings" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" +) + +var EmptyTSK = TipSetKey{} + +// The length of a block header CID in bytes. +var blockHeaderCIDLen int + +func init() { + // hash a large string of zeros so we don't estimate based on inlined CIDs. + var buf [256]byte + c, err := abi.CidBuilder.Sum(buf[:]) + if err != nil { + panic(err) + } + blockHeaderCIDLen = len(c.Bytes()) +} + +// A TipSetKey is an immutable collection of CIDs forming a unique key for a tipset. +// The CIDs are assumed to be distinct and in canonical order. Two keys with the same +// CIDs in a different order are not considered equal. +// TipSetKey is a lightweight value type, and may be compared for equality with ==. +type TipSetKey struct { + // The internal representation is a concatenation of the bytes of the CIDs, which are + // self-describing, wrapped as a string. + // These gymnastics make the a TipSetKey usable as a map key. + // The empty key has value "". + value string +} + +// NewTipSetKey builds a new key from a slice of CIDs. +// The CIDs are assumed to be ordered correctly. +func NewTipSetKey(cids ...cid.Cid) TipSetKey { + encoded := encodeKey(cids) + return TipSetKey{string(encoded)} +} + +// TipSetKeyFromBytes wraps an encoded key, validating correct decoding. +func TipSetKeyFromBytes(encoded []byte) (TipSetKey, error) { + _, err := decodeKey(encoded) + if err != nil { + return TipSetKey{}, err + } + return TipSetKey{string(encoded)}, nil +} + +// Cids returns a slice of the CIDs comprising this key. +func (k TipSetKey) Cids() []cid.Cid { + cids, err := decodeKey([]byte(k.value)) + if err != nil { + panic("invalid tipset key: " + err.Error()) + } + return cids +} + +// String() returns a human-readable representation of the key. +func (k TipSetKey) String() string { + b := strings.Builder{} + b.WriteString("{") + cids := k.Cids() + for i, c := range cids { + b.WriteString(c.String()) + if i < len(cids)-1 { + b.WriteString(",") + } + } + b.WriteString("}") + return b.String() +} + +// Bytes() returns a binary representation of the key. +func (k TipSetKey) Bytes() []byte { + return []byte(k.value) +} + +func (k TipSetKey) MarshalJSON() ([]byte, error) { + return json.Marshal(k.Cids()) +} + +func (k *TipSetKey) UnmarshalJSON(b []byte) error { + var cids []cid.Cid + if err := json.Unmarshal(b, &cids); err != nil { + return err + } + k.value = string(encodeKey(cids)) + return nil +} + +func (k TipSetKey) IsEmpty() bool { + return len(k.value) == 0 +} + +func encodeKey(cids []cid.Cid) []byte { + buffer := new(bytes.Buffer) + for _, c := range cids { + // bytes.Buffer.Write() err is documented to be always nil. + _, _ = buffer.Write(c.Bytes()) + } + return buffer.Bytes() +} + +func decodeKey(encoded []byte) ([]cid.Cid, error) { + // To avoid reallocation of the underlying array, estimate the number of CIDs to be extracted + // by dividing the encoded length by the expected CID length. + estimatedCount := len(encoded) / blockHeaderCIDLen + cids := make([]cid.Cid, 0, estimatedCount) + nextIdx := 0 + for nextIdx < len(encoded) { + nr, c, err := cid.CidFromBytes(encoded[nextIdx:]) + if err != nil { + return nil, err + } + cids = append(cids, c) + nextIdx += nr + } + return cids, nil +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/vmcontext.go b/vendor/github.com/filecoin-project/lotus/chain/types/vmcontext.go new file mode 100644 index 0000000000..d0ce42c0f2 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/vmcontext.go @@ -0,0 +1,47 @@ +package types + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/aerrors" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" +) + +type Storage interface { + Put(cbg.CBORMarshaler) (cid.Cid, aerrors.ActorError) + Get(cid.Cid, cbg.CBORUnmarshaler) aerrors.ActorError + + GetHead() cid.Cid + + // Commit sets the new head of the actors state as long as the current + // state matches 'oldh' + Commit(oldh cid.Cid, newh cid.Cid) aerrors.ActorError +} + +type StateTree interface { + SetActor(addr address.Address, act *Actor) error + // GetActor returns the actor from any type of `addr` provided. + GetActor(addr address.Address) (*Actor, error) +} + +type storageWrapper struct { + s Storage +} + +func (sw *storageWrapper) Put(i cbg.CBORMarshaler) (cid.Cid, error) { + c, err := sw.s.Put(i) + if err != nil { + return cid.Undef, err + } + + return c, nil +} + +func (sw *storageWrapper) Get(c cid.Cid, out cbg.CBORUnmarshaler) error { + if err := sw.s.Get(c, out); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/lotus/lib/addrutil/parse.go b/vendor/github.com/filecoin-project/lotus/lib/addrutil/parse.go new file mode 100644 index 0000000000..f9ee04c3f7 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/lib/addrutil/parse.go @@ -0,0 +1,89 @@ +package addrutil + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/libp2p/go-libp2p-core/peer" + ma "github.com/multiformats/go-multiaddr" + madns "github.com/multiformats/go-multiaddr-dns" +) + +// ParseAddresses is a function that takes in a slice of string peer addresses +// (multiaddr + peerid) and returns a slice of properly constructed peers +func ParseAddresses(ctx context.Context, addrs []string) ([]peer.AddrInfo, error) { + // resolve addresses + maddrs, err := resolveAddresses(ctx, addrs) + if err != nil { + return nil, err + } + + return peer.AddrInfosFromP2pAddrs(maddrs...) +} + +const ( + dnsResolveTimeout = 10 * time.Second +) + +// resolveAddresses resolves addresses parallelly +func resolveAddresses(ctx context.Context, addrs []string) ([]ma.Multiaddr, error) { + ctx, cancel := context.WithTimeout(ctx, dnsResolveTimeout) + defer cancel() + + var maddrs []ma.Multiaddr + var wg sync.WaitGroup + resolveErrC := make(chan error, len(addrs)) + + maddrC := make(chan ma.Multiaddr) + + for _, addr := range addrs { + maddr, err := ma.NewMultiaddr(addr) + if err != nil { + return nil, err + } + + // check whether address ends in `ipfs/Qm...` + if _, last := ma.SplitLast(maddr); last.Protocol().Code == ma.P_IPFS { + maddrs = append(maddrs, maddr) + continue + } + wg.Add(1) + go func(maddr ma.Multiaddr) { + defer wg.Done() + raddrs, err := madns.Resolve(ctx, maddr) + if err != nil { + resolveErrC <- err + return + } + // filter out addresses that still doesn't end in `ipfs/Qm...` + found := 0 + for _, raddr := range raddrs { + if _, last := ma.SplitLast(raddr); last != nil && last.Protocol().Code == ma.P_IPFS { + maddrC <- raddr + found++ + } + } + if found == 0 { + resolveErrC <- fmt.Errorf("found no ipfs peers at %s", maddr) + } + }(maddr) + } + go func() { + wg.Wait() + close(maddrC) + }() + + for maddr := range maddrC { + maddrs = append(maddrs, maddr) + } + + select { + case err := <-resolveErrC: + return nil, err + default: + } + + return maddrs, nil +} diff --git a/vendor/github.com/filecoin-project/lotus/lib/sigs/doc.go b/vendor/github.com/filecoin-project/lotus/lib/sigs/doc.go new file mode 100644 index 0000000000..ca3093f395 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/lib/sigs/doc.go @@ -0,0 +1,9 @@ +// Package sigs allows for signing, verifying signatures and key generation +// using key types selected by package user. +// +// For support of secp256k1 import: +// _ "github.com/filecoin-project/lotus/lib/sigs/secp" +// +// For support of Filecoin BLS import: +// _ "github.com/filecoin-project/lotus/lib/sigs/bls" +package sigs diff --git a/vendor/github.com/filecoin-project/lotus/lib/sigs/secp/init.go b/vendor/github.com/filecoin-project/lotus/lib/sigs/secp/init.go new file mode 100644 index 0000000000..674bbbb28c --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/lib/sigs/secp/init.go @@ -0,0 +1,59 @@ +package secp + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-crypto" + crypto2 "github.com/filecoin-project/go-state-types/crypto" + "github.com/minio/blake2b-simd" + + "github.com/filecoin-project/lotus/lib/sigs" +) + +type secpSigner struct{} + +func (secpSigner) GenPrivate() ([]byte, error) { + priv, err := crypto.GenerateKey() + if err != nil { + return nil, err + } + return priv, nil +} + +func (secpSigner) ToPublic(pk []byte) ([]byte, error) { + return crypto.PublicKey(pk), nil +} + +func (secpSigner) Sign(pk []byte, msg []byte) ([]byte, error) { + b2sum := blake2b.Sum256(msg) + sig, err := crypto.Sign(pk, b2sum[:]) + if err != nil { + return nil, err + } + + return sig, nil +} + +func (secpSigner) Verify(sig []byte, a address.Address, msg []byte) error { + b2sum := blake2b.Sum256(msg) + pubk, err := crypto.EcRecover(b2sum[:], sig) + if err != nil { + return err + } + + maybeaddr, err := address.NewSecp256k1Address(pubk) + if err != nil { + return err + } + + if a != maybeaddr { + return fmt.Errorf("signature did not match") + } + + return nil +} + +func init() { + sigs.RegisterSignature(crypto2.SigTypeSecp256k1, secpSigner{}) +} diff --git a/vendor/github.com/filecoin-project/lotus/lib/sigs/sigs.go b/vendor/github.com/filecoin-project/lotus/lib/sigs/sigs.go new file mode 100644 index 0000000000..1f56846a82 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/lib/sigs/sigs.go @@ -0,0 +1,112 @@ +package sigs + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "go.opencensus.io/trace" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" +) + +// Sign takes in signature type, private key and message. Returns a signature for that message. +// Valid sigTypes are: "secp256k1" and "bls" +func Sign(sigType crypto.SigType, privkey []byte, msg []byte) (*crypto.Signature, error) { + sv, ok := sigs[sigType] + if !ok { + return nil, fmt.Errorf("cannot sign message with signature of unsupported type: %v", sigType) + } + + sb, err := sv.Sign(privkey, msg) + if err != nil { + return nil, err + } + return &crypto.Signature{ + Type: sigType, + Data: sb, + }, nil +} + +// Verify verifies signatures +func Verify(sig *crypto.Signature, addr address.Address, msg []byte) error { + if sig == nil { + return xerrors.Errorf("signature is nil") + } + + if addr.Protocol() == address.ID { + return fmt.Errorf("must resolve ID addresses before using them to verify a signature") + } + + sv, ok := sigs[sig.Type] + if !ok { + return fmt.Errorf("cannot verify signature of unsupported type: %v", sig.Type) + } + + return sv.Verify(sig.Data, addr, msg) +} + +// Generate generates private key of given type +func Generate(sigType crypto.SigType) ([]byte, error) { + sv, ok := sigs[sigType] + if !ok { + return nil, fmt.Errorf("cannot generate private key of unsupported type: %v", sigType) + } + + return sv.GenPrivate() +} + +// ToPublic converts private key to public key +func ToPublic(sigType crypto.SigType, pk []byte) ([]byte, error) { + sv, ok := sigs[sigType] + if !ok { + return nil, fmt.Errorf("cannot generate public key of unsupported type: %v", sigType) + } + + return sv.ToPublic(pk) +} + +func CheckBlockSignature(ctx context.Context, blk *types.BlockHeader, worker address.Address) error { + _, span := trace.StartSpan(ctx, "checkBlockSignature") + defer span.End() + + if blk.IsValidated() { + return nil + } + + if blk.BlockSig == nil { + return xerrors.New("block signature not present") + } + + sigb, err := blk.SigningBytes() + if err != nil { + return xerrors.Errorf("failed to get block signing bytes: %w", err) + } + + err = Verify(blk.BlockSig, worker, sigb) + if err == nil { + blk.SetValidated() + } + + return err +} + +// SigShim is used for introducing signature functions +type SigShim interface { + GenPrivate() ([]byte, error) + ToPublic(pk []byte) ([]byte, error) + Sign(pk []byte, msg []byte) ([]byte, error) + Verify(sig []byte, a address.Address, msg []byte) error +} + +var sigs map[crypto.SigType]SigShim + +// RegisterSignature should be only used during init +func RegisterSignature(typ crypto.SigType, vs SigShim) { + if sigs == nil { + sigs = make(map[crypto.SigType]SigShim) + } + sigs[typ] = vs +} diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/api.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/api.go new file mode 100644 index 0000000000..d57b05cfac --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/api.go @@ -0,0 +1,10 @@ +package dtypes + +import ( + "github.com/gbrlsnchs/jwt/v3" + "github.com/multiformats/go-multiaddr" +) + +type APIAlg jwt.HMACSHA + +type APIEndpoint multiaddr.Multiaddr diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/beacon.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/beacon.go new file mode 100644 index 0000000000..28bbdf281d --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/beacon.go @@ -0,0 +1,16 @@ +package dtypes + +import "github.com/filecoin-project/go-state-types/abi" + +type DrandSchedule []DrandPoint + +type DrandPoint struct { + Start abi.ChainEpoch + Config DrandConfig +} + +type DrandConfig struct { + Servers []string + Relays []string + ChainInfoJSON string +} diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/bootstrap.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/bootstrap.go new file mode 100644 index 0000000000..96cd2f673e --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/bootstrap.go @@ -0,0 +1,8 @@ +package dtypes + +import "github.com/libp2p/go-libp2p-core/peer" + +type BootstrapPeers []peer.AddrInfo +type DrandBootstrap []peer.AddrInfo + +type Bootstrapper bool diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/chain.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/chain.go new file mode 100644 index 0000000000..0c924c12ab --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/chain.go @@ -0,0 +1,4 @@ +package dtypes + +type NetworkName string +type AfterGenesisSet struct{} diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/miner.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/miner.go new file mode 100644 index 0000000000..1ef157b7ed --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/miner.go @@ -0,0 +1,76 @@ +package dtypes + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" +) + +type MinerAddress address.Address +type MinerID abi.ActorID + +// ConsiderOnlineStorageDealsConfigFunc is a function which reads from miner +// config to determine if the user has disabled storage deals (or not). +type ConsiderOnlineStorageDealsConfigFunc func() (bool, error) + +// SetConsiderOnlineStorageDealsConfigFunc is a function which is used to +// disable or enable storage deal acceptance. +type SetConsiderOnlineStorageDealsConfigFunc func(bool) error + +// ConsiderOnlineRetrievalDealsConfigFunc is a function which reads from miner +// config to determine if the user has disabled retrieval acceptance (or not). +type ConsiderOnlineRetrievalDealsConfigFunc func() (bool, error) + +// SetConsiderOnlineRetrievalDealsConfigFunc is a function which is used to +// disable or enable retrieval deal acceptance. +type SetConsiderOnlineRetrievalDealsConfigFunc func(bool) error + +// StorageDealPieceCidBlocklistConfigFunc is a function which reads from miner +// config to obtain a list of CIDs for which the miner will not accept +// storage proposals. +type StorageDealPieceCidBlocklistConfigFunc func() ([]cid.Cid, error) + +// SetStorageDealPieceCidBlocklistConfigFunc is a function which is used to set a +// list of CIDs for which the miner will reject deal proposals. +type SetStorageDealPieceCidBlocklistConfigFunc func([]cid.Cid) error + +// ConsiderOfflineStorageDealsConfigFunc is a function which reads from miner +// config to determine if the user has disabled storage deals (or not). +type ConsiderOfflineStorageDealsConfigFunc func() (bool, error) + +// SetConsiderOfflineStorageDealsConfigFunc is a function which is used to +// disable or enable storage deal acceptance. +type SetConsiderOfflineStorageDealsConfigFunc func(bool) error + +// ConsiderOfflineRetrievalDealsConfigFunc is a function which reads from miner +// config to determine if the user has disabled retrieval acceptance (or not). +type ConsiderOfflineRetrievalDealsConfigFunc func() (bool, error) + +// SetConsiderOfflineRetrievalDealsConfigFunc is a function which is used to +// disable or enable retrieval deal acceptance. +type SetConsiderOfflineRetrievalDealsConfigFunc func(bool) error + +// SetSealingDelay sets how long a sector waits for more deals before sealing begins. +type SetSealingConfigFunc func(sealiface.Config) error + +// GetSealingDelay returns how long a sector waits for more deals before sealing begins. +type GetSealingConfigFunc func() (sealiface.Config, error) + +// SetExpectedSealDurationFunc is a function which is used to set how long sealing is expected to take. +// Deals that would need to start earlier than this duration will be rejected. +type SetExpectedSealDurationFunc func(time.Duration) error + +// GetExpectedSealDurationFunc is a function which reads from miner +// too determine how long sealing is expected to take +type GetExpectedSealDurationFunc func() (time.Duration, error) + +type StorageDealFilter func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) +type RetrievalDealFilter func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/scorekeeper.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/scorekeeper.go new file mode 100644 index 0000000000..7999d19e56 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/scorekeeper.go @@ -0,0 +1,25 @@ +package dtypes + +import ( + "sync" + + peer "github.com/libp2p/go-libp2p-core/peer" + pubsub "github.com/libp2p/go-libp2p-pubsub" +) + +type ScoreKeeper struct { + lk sync.Mutex + scores map[peer.ID]*pubsub.PeerScoreSnapshot +} + +func (sk *ScoreKeeper) Update(scores map[peer.ID]*pubsub.PeerScoreSnapshot) { + sk.lk.Lock() + sk.scores = scores + sk.lk.Unlock() +} + +func (sk *ScoreKeeper) Get() map[peer.ID]*pubsub.PeerScoreSnapshot { + sk.lk.Lock() + defer sk.lk.Unlock() + return sk.scores +} diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/shutdown.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/shutdown.go new file mode 100644 index 0000000000..d87c2129aa --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/shutdown.go @@ -0,0 +1,5 @@ +package dtypes + +// ShutdownChan is a channel to which you send a value if you intend to shut +// down the daemon (or miner), including the node and RPC server. +type ShutdownChan chan struct{} diff --git a/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/storage.go b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/storage.go new file mode 100644 index 0000000000..13defda8de --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/node/modules/dtypes/storage.go @@ -0,0 +1,56 @@ +package dtypes + +import ( + bserv "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-graphsync" + exchange "github.com/ipfs/go-ipfs-exchange-interface" + format "github.com/ipfs/go-ipld-format" + + "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" + "github.com/filecoin-project/go-multistore" + + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/piecestore" + "github.com/filecoin-project/go-statestore" + + "github.com/filecoin-project/lotus/lib/blockstore" + "github.com/filecoin-project/lotus/node/repo/importmgr" + "github.com/filecoin-project/lotus/node/repo/retrievalstoremgr" +) + +// MetadataDS stores metadata +// dy default it's namespaced under /metadata in main repo datastore +type MetadataDS datastore.Batching + +type ChainBlockstore blockstore.Blockstore + +type ChainGCLocker blockstore.GCLocker +type ChainGCBlockstore blockstore.GCBlockstore +type ChainBitswap exchange.Interface +type ChainBlockService bserv.BlockService + +type ClientMultiDstore *multistore.MultiStore +type ClientImportMgr *importmgr.Mgr +type ClientBlockstore blockstore.Blockstore +type ClientDealStore *statestore.StateStore +type ClientRequestValidator *requestvalidation.UnifiedRequestValidator +type ClientDatastore datastore.Batching +type ClientRetrievalStoreManager retrievalstoremgr.RetrievalStoreManager + +type Graphsync graphsync.GraphExchange + +// ClientDataTransfer is a data transfer manager for the client +type ClientDataTransfer datatransfer.Manager + +type ProviderDealStore *statestore.StateStore +type ProviderPieceStore piecestore.PieceStore +type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator + +// ProviderDataTransfer is a data transfer manager for the provider +type ProviderDataTransfer datatransfer.Manager + +type StagingDAG format.DAGService +type StagingBlockstore blockstore.Blockstore +type StagingGraphsync graphsync.GraphExchange +type StagingMultiDstore *multistore.MultiStore diff --git a/vendor/github.com/filecoin-project/specs-actors/COPYRIGHT b/vendor/github.com/filecoin-project/specs-actors/COPYRIGHT new file mode 100644 index 0000000000..6aa4b36128 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2020. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/specs-actors/LICENSE-APACHE b/vendor/github.com/filecoin-project/specs-actors/LICENSE-APACHE new file mode 100644 index 0000000000..22608cf836 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2020. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/specs-actors/LICENSE-MIT b/vendor/github.com/filecoin-project/specs-actors/LICENSE-MIT new file mode 100644 index 0000000000..c6134ad88a --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2020. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/cbor_gen.go new file mode 100644 index 0000000000..3f21b4d5e1 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/cbor_gen.go @@ -0,0 +1,205 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package builtin + +import ( + "fmt" + "io" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufMinerAddrs = []byte{131} + +func (t *MinerAddrs) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMinerAddrs); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Owner (address.Address) (struct) + if err := t.Owner.MarshalCBOR(w); err != nil { + return err + } + + // t.Worker (address.Address) (struct) + if err := t.Worker.MarshalCBOR(w); err != nil { + return err + } + + // t.ControlAddrs ([]address.Address) (slice) + if len(t.ControlAddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.ControlAddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddrs))); err != nil { + return err + } + for _, v := range t.ControlAddrs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *MinerAddrs) UnmarshalCBOR(r io.Reader) error { + *t = MinerAddrs{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Owner (address.Address) (struct) + + { + + if err := t.Owner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Owner: %w", err) + } + + } + // t.Worker (address.Address) (struct) + + { + + if err := t.Worker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Worker: %w", err) + } + + } + // t.ControlAddrs ([]address.Address) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.ControlAddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.ControlAddrs = make([]address.Address, extra) + } + + for i := 0; i < int(extra); i++ { + + var v address.Address + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.ControlAddrs[i] = v + } + + return nil +} + +var lengthBufConfirmSectorProofsParams = []byte{129} + +func (t *ConfirmSectorProofsParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufConfirmSectorProofsParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Sectors ([]abi.SectorNumber) (slice) + if len(t.Sectors) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Sectors was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sectors))); err != nil { + return err + } + for _, v := range t.Sectors { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + return nil +} + +func (t *ConfirmSectorProofsParams) UnmarshalCBOR(r io.Reader) error { + *t = ConfirmSectorProofsParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Sectors ([]abi.SectorNumber) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Sectors: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Sectors = make([]abi.SectorNumber, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.Sectors slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.Sectors was not a uint, instead got %d", maj) + } + + t.Sectors[i] = abi.SectorNumber(val) + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/codes.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/codes.go new file mode 100644 index 0000000000..b148a2c018 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/codes.go @@ -0,0 +1,97 @@ +package builtin + +import ( + "sort" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// The built-in actor code IDs +var ( + SystemActorCodeID cid.Cid + InitActorCodeID cid.Cid + CronActorCodeID cid.Cid + AccountActorCodeID cid.Cid + StoragePowerActorCodeID cid.Cid + StorageMinerActorCodeID cid.Cid + StorageMarketActorCodeID cid.Cid + PaymentChannelActorCodeID cid.Cid + MultisigActorCodeID cid.Cid + RewardActorCodeID cid.Cid + VerifiedRegistryActorCodeID cid.Cid + CallerTypesSignable []cid.Cid +) + +var builtinActors map[cid.Cid]*actorInfo + +type actorInfo struct { + name string + signer bool +} + +func init() { + builder := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} + builtinActors = make(map[cid.Cid]*actorInfo) + + for id, info := range map[*cid.Cid]*actorInfo{ //nolint:nomaprange + &SystemActorCodeID: {name: "fil/1/system"}, + &InitActorCodeID: {name: "fil/1/init"}, + &CronActorCodeID: {name: "fil/1/cron"}, + &StoragePowerActorCodeID: {name: "fil/1/storagepower"}, + &StorageMinerActorCodeID: {name: "fil/1/storageminer"}, + &StorageMarketActorCodeID: {name: "fil/1/storagemarket"}, + &PaymentChannelActorCodeID: {name: "fil/1/paymentchannel"}, + &RewardActorCodeID: {name: "fil/1/reward"}, + &VerifiedRegistryActorCodeID: {name: "fil/1/verifiedregistry"}, + &AccountActorCodeID: {name: "fil/1/account", signer: true}, + &MultisigActorCodeID: {name: "fil/1/multisig", signer: true}, + } { + c, err := builder.Sum([]byte(info.name)) + if err != nil { + panic(err) + } + *id = c + builtinActors[c] = info + } + + // Set of actor code types that can represent external signing parties. + for id, info := range builtinActors { //nolint:nomaprange + if info.signer { + CallerTypesSignable = append(CallerTypesSignable, id) + } + } + sort.Slice(CallerTypesSignable, func(i, j int) bool { + return CallerTypesSignable[i].KeyString() < CallerTypesSignable[j].KeyString() + }) + +} + +// IsBuiltinActor returns true if the code belongs to an actor defined in this repo. +func IsBuiltinActor(code cid.Cid) bool { + _, isBuiltin := builtinActors[code] + return isBuiltin +} + +// ActorNameByCode returns the (string) name of the actor given a cid code. +func ActorNameByCode(code cid.Cid) string { + if !code.Defined() { + return "" + } + + info, ok := builtinActors[code] + if !ok { + return "" + } + return info.name +} + +// Tests whether a code CID represents an actor that can be an external principal: i.e. an account or multisig. +// We could do something more sophisticated here: https://github.com/filecoin-project/specs-actors/issues/178 +func IsPrincipal(code cid.Cid) bool { + info, ok := builtinActors[code] + if !ok { + return false + } + return info.signer +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/cbor_gen.go new file mode 100644 index 0000000000..9e38340321 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/cbor_gen.go @@ -0,0 +1,318 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package init + +import ( + "fmt" + "io" + + abi "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufState = []byte{131} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.AddressMap (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.AddressMap); err != nil { + return xerrors.Errorf("failed to write cid field t.AddressMap: %w", err) + } + + // t.NextID (abi.ActorID) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NextID)); err != nil { + return err + } + + // t.NetworkName (string) (string) + if len(t.NetworkName) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.NetworkName was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.NetworkName))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.NetworkName)); err != nil { + return err + } + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.AddressMap (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddressMap: %w", err) + } + + t.AddressMap = c + + } + // t.NextID (abi.ActorID) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.NextID = abi.ActorID(extra) + + } + // t.NetworkName (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.NetworkName = string(sval) + } + return nil +} + +var lengthBufConstructorParams = []byte{129} + +func (t *ConstructorParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufConstructorParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.NetworkName (string) (string) + if len(t.NetworkName) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.NetworkName was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.NetworkName))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.NetworkName)); err != nil { + return err + } + return nil +} + +func (t *ConstructorParams) UnmarshalCBOR(r io.Reader) error { + *t = ConstructorParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.NetworkName (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.NetworkName = string(sval) + } + return nil +} + +var lengthBufExecParams = []byte{130} + +func (t *ExecParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufExecParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.CodeCID (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.CodeCID); err != nil { + return xerrors.Errorf("failed to write cid field t.CodeCID: %w", err) + } + + // t.ConstructorParams ([]uint8) (slice) + if len(t.ConstructorParams) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.ConstructorParams was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ConstructorParams))); err != nil { + return err + } + + if _, err := w.Write(t.ConstructorParams[:]); err != nil { + return err + } + return nil +} + +func (t *ExecParams) UnmarshalCBOR(r io.Reader) error { + *t = ExecParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.CodeCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CodeCID: %w", err) + } + + t.CodeCID = c + + } + // t.ConstructorParams ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.ConstructorParams: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.ConstructorParams = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.ConstructorParams[:]); err != nil { + return err + } + return nil +} + +var lengthBufExecReturn = []byte{130} + +func (t *ExecReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufExecReturn); err != nil { + return err + } + + // t.IDAddress (address.Address) (struct) + if err := t.IDAddress.MarshalCBOR(w); err != nil { + return err + } + + // t.RobustAddress (address.Address) (struct) + if err := t.RobustAddress.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ExecReturn) UnmarshalCBOR(r io.Reader) error { + *t = ExecReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.IDAddress (address.Address) (struct) + + { + + if err := t.IDAddress.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.IDAddress: %w", err) + } + + } + // t.RobustAddress (address.Address) (struct) + + { + + if err := t.RobustAddress.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RobustAddress: %w", err) + } + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/init_actor.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/init_actor.go new file mode 100644 index 0000000000..c796d34e6a --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/init_actor.go @@ -0,0 +1,109 @@ +package init + +import ( + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + cid "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/runtime" + autil "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +// The init actor uniquely has the power to create new actors. +// It maintains a table resolving pubkey and temporary actor addresses to the canonical ID-addresses. +type Actor struct{} + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.Exec, + } +} + +func (a Actor) Code() cid.Cid { + return builtin.InitActorCodeID +} + +func (a Actor) IsSingleton() bool { + return true +} + +func (a Actor) State() cbor.Er { return new(State) } + +var _ runtime.VMActor = Actor{} + +type ConstructorParams struct { + NetworkName string +} + +func (a Actor) Constructor(rt runtime.Runtime, params *ConstructorParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + emptyMap, err := adt.MakeEmptyMap(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to construct state") + + st := ConstructState(emptyMap, params.NetworkName) + rt.StateCreate(st) + return nil +} + +type ExecParams struct { + CodeCID cid.Cid `checked:"true"` // invalid CIDs won't get committed to the state tree + ConstructorParams []byte +} + +type ExecReturn struct { + IDAddress addr.Address // The canonical ID-based address for the actor. + RobustAddress addr.Address // A more expensive but re-org-safe address for the newly created actor. +} + +func (a Actor) Exec(rt runtime.Runtime, params *ExecParams) *ExecReturn { + rt.ValidateImmediateCallerAcceptAny() + callerCodeCID, ok := rt.GetActorCodeCID(rt.Caller()) + autil.AssertMsg(ok, "no code for actor at %s", rt.Caller()) + if !canExec(callerCodeCID, params.CodeCID) { + rt.Abortf(exitcode.ErrForbidden, "caller type %v cannot exec actor type %v", callerCodeCID, params.CodeCID) + } + + // Compute a re-org-stable address. + // This address exists for use by messages coming from outside the system, in order to + // stably address the newly created actor even if a chain re-org causes it to end up with + // a different ID. + uniqueAddress := rt.NewActorAddress() + + // Allocate an ID for this actor. + // Store mapping of pubkey or actor address to actor ID + var st State + var idAddr addr.Address + rt.StateTransaction(&st, func() { + var err error + idAddr, err = st.MapAddressToNewID(adt.AsStore(rt), uniqueAddress) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to allocate ID address") + }) + + // Create an empty actor. + rt.CreateActor(params.CodeCID, idAddr) + + // Invoke constructor. + code := rt.Send(idAddr, builtin.MethodConstructor, runtime.CBORBytes(params.ConstructorParams), rt.ValueReceived(), &builtin.Discard{}) + builtin.RequireSuccess(rt, code, "constructor failed") + + return &ExecReturn{idAddr, uniqueAddress} +} + +func canExec(callerCodeID cid.Cid, execCodeID cid.Cid) bool { + switch execCodeID { + case builtin.StorageMinerActorCodeID: + if callerCodeID == builtin.StoragePowerActorCodeID { + return true + } + return false + case builtin.PaymentChannelActorCodeID, builtin.MultisigActorCodeID: + return true + default: + return false + } +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/init_actor_state.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/init_actor_state.go new file mode 100644 index 0000000000..c02dfcad75 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/init/init_actor_state.go @@ -0,0 +1,87 @@ +package init + +import ( + addr "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" + + builtin "github.com/filecoin-project/specs-actors/actors/builtin" + autil "github.com/filecoin-project/specs-actors/actors/util" + adt "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +type State struct { + AddressMap cid.Cid // HAMT[addr.Address]abi.ActorID + NextID abi.ActorID + NetworkName string +} + +func ConstructState(addressMapRoot cid.Cid, networkName string) *State { + return &State{ + AddressMap: addressMapRoot, + NextID: abi.ActorID(builtin.FirstNonSingletonActorId), + NetworkName: networkName, + } +} + +// ResolveAddress resolves an address to an ID-address, if possible. +// If the provided address is an ID address, it is returned as-is. +// This means that mapped ID-addresses (which should only appear as values, not keys) and +// singleton actor addresses (which are not in the map) pass through unchanged. +// +// Returns an ID-address and `true` if the address was already an ID-address or was resolved in the mapping. +// Returns an undefined address and `false` if the address was not an ID-address and not found in the mapping. +// Returns an error only if state was inconsistent. +func (s *State) ResolveAddress(store adt.Store, address addr.Address) (addr.Address, bool, error) { + // Short-circuit ID address resolution. + if address.Protocol() == addr.ID { + return address, true, nil + } + + // Lookup address. + m, err := adt.AsMap(store, s.AddressMap) + if err != nil { + return addr.Undef, false, xerrors.Errorf("failed to load address map: %w", err) + } + + var actorID cbg.CborInt + found, err := m.Get(abi.AddrKey(address), &actorID) + if err != nil { + return addr.Undef, false, xerrors.Errorf("failed to get from address map: %w", err) + } + if found { + // Reconstruct address from the ActorID. + idAddr, err2 := addr.NewIDAddress(uint64(actorID)) + autil.Assert(err2 == nil) + return idAddr, true, nil + } else { + return addr.Undef, false, nil + } +} + +// Allocates a new ID address and stores a mapping of the argument address to it. +// Returns the newly-allocated address. +func (s *State) MapAddressToNewID(store adt.Store, address addr.Address) (addr.Address, error) { + actorID := cbg.CborInt(s.NextID) + s.NextID++ + + m, err := adt.AsMap(store, s.AddressMap) + if err != nil { + return addr.Undef, xerrors.Errorf("failed to load address map: %w", err) + } + err = m.Put(abi.AddrKey(address), &actorID) + if err != nil { + return addr.Undef, xerrors.Errorf("map address failed to store entry: %w", err) + } + amr, err := m.Root() + if err != nil { + return addr.Undef, xerrors.Errorf("failed to get address map root: %w", err) + } + s.AddressMap = amr + + idAddr, err := addr.NewIDAddress(uint64(actorID)) + autil.Assert(err == nil) + return idAddr, nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/cbor_gen.go new file mode 100644 index 0000000000..7085efc6f4 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/cbor_gen.go @@ -0,0 +1,1520 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package market + +import ( + "fmt" + "io" + + abi "github.com/filecoin-project/go-state-types/abi" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufState = []byte{139} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Proposals (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Proposals); err != nil { + return xerrors.Errorf("failed to write cid field t.Proposals: %w", err) + } + + // t.States (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.States); err != nil { + return xerrors.Errorf("failed to write cid field t.States: %w", err) + } + + // t.PendingProposals (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.PendingProposals); err != nil { + return xerrors.Errorf("failed to write cid field t.PendingProposals: %w", err) + } + + // t.EscrowTable (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.EscrowTable); err != nil { + return xerrors.Errorf("failed to write cid field t.EscrowTable: %w", err) + } + + // t.LockedTable (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.LockedTable); err != nil { + return xerrors.Errorf("failed to write cid field t.LockedTable: %w", err) + } + + // t.NextID (abi.DealID) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NextID)); err != nil { + return err + } + + // t.DealOpsByEpoch (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.DealOpsByEpoch); err != nil { + return xerrors.Errorf("failed to write cid field t.DealOpsByEpoch: %w", err) + } + + // t.LastCron (abi.ChainEpoch) (int64) + if t.LastCron >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LastCron)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.LastCron-1)); err != nil { + return err + } + } + + // t.TotalClientLockedCollateral (big.Int) (struct) + if err := t.TotalClientLockedCollateral.MarshalCBOR(w); err != nil { + return err + } + + // t.TotalProviderLockedCollateral (big.Int) (struct) + if err := t.TotalProviderLockedCollateral.MarshalCBOR(w); err != nil { + return err + } + + // t.TotalClientStorageFee (big.Int) (struct) + if err := t.TotalClientStorageFee.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 11 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Proposals (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Proposals: %w", err) + } + + t.Proposals = c + + } + // t.States (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.States: %w", err) + } + + t.States = c + + } + // t.PendingProposals (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PendingProposals: %w", err) + } + + t.PendingProposals = c + + } + // t.EscrowTable (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.EscrowTable: %w", err) + } + + t.EscrowTable = c + + } + // t.LockedTable (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.LockedTable: %w", err) + } + + t.LockedTable = c + + } + // t.NextID (abi.DealID) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.NextID = abi.DealID(extra) + + } + // t.DealOpsByEpoch (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.DealOpsByEpoch: %w", err) + } + + t.DealOpsByEpoch = c + + } + // t.LastCron (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.LastCron = abi.ChainEpoch(extraI) + } + // t.TotalClientLockedCollateral (big.Int) (struct) + + { + + if err := t.TotalClientLockedCollateral.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalClientLockedCollateral: %w", err) + } + + } + // t.TotalProviderLockedCollateral (big.Int) (struct) + + { + + if err := t.TotalProviderLockedCollateral.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalProviderLockedCollateral: %w", err) + } + + } + // t.TotalClientStorageFee (big.Int) (struct) + + { + + if err := t.TotalClientStorageFee.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalClientStorageFee: %w", err) + } + + } + return nil +} + +var lengthBufWithdrawBalanceParams = []byte{130} + +func (t *WithdrawBalanceParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufWithdrawBalanceParams); err != nil { + return err + } + + // t.ProviderOrClientAddress (address.Address) (struct) + if err := t.ProviderOrClientAddress.MarshalCBOR(w); err != nil { + return err + } + + // t.Amount (big.Int) (struct) + if err := t.Amount.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *WithdrawBalanceParams) UnmarshalCBOR(r io.Reader) error { + *t = WithdrawBalanceParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ProviderOrClientAddress (address.Address) (struct) + + { + + if err := t.ProviderOrClientAddress.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ProviderOrClientAddress: %w", err) + } + + } + // t.Amount (big.Int) (struct) + + { + + if err := t.Amount.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Amount: %w", err) + } + + } + return nil +} + +var lengthBufPublishStorageDealsParams = []byte{129} + +func (t *PublishStorageDealsParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufPublishStorageDealsParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deals ([]market.ClientDealProposal) (slice) + if len(t.Deals) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Deals was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Deals))); err != nil { + return err + } + for _, v := range t.Deals { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *PublishStorageDealsParams) UnmarshalCBOR(r io.Reader) error { + *t = PublishStorageDealsParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deals ([]market.ClientDealProposal) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Deals: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Deals = make([]ClientDealProposal, extra) + } + + for i := 0; i < int(extra); i++ { + + var v ClientDealProposal + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Deals[i] = v + } + + return nil +} + +var lengthBufActivateDealsParams = []byte{130} + +func (t *ActivateDealsParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufActivateDealsParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.DealIDs ([]abi.DealID) (slice) + if len(t.DealIDs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.DealIDs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { + return err + } + for _, v := range t.DealIDs { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + + // t.SectorExpiry (abi.ChainEpoch) (int64) + if t.SectorExpiry >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorExpiry)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorExpiry-1)); err != nil { + return err + } + } + return nil +} + +func (t *ActivateDealsParams) UnmarshalCBOR(r io.Reader) error { + *t = ActivateDealsParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealIDs ([]abi.DealID) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.DealIDs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.DealIDs = make([]abi.DealID, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) + } + + t.DealIDs[i] = abi.DealID(val) + } + + // t.SectorExpiry (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SectorExpiry = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufVerifyDealsForActivationParams = []byte{131} + +func (t *VerifyDealsForActivationParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufVerifyDealsForActivationParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.DealIDs ([]abi.DealID) (slice) + if len(t.DealIDs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.DealIDs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { + return err + } + for _, v := range t.DealIDs { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + + // t.SectorExpiry (abi.ChainEpoch) (int64) + if t.SectorExpiry >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorExpiry)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorExpiry-1)); err != nil { + return err + } + } + + // t.SectorStart (abi.ChainEpoch) (int64) + if t.SectorStart >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorStart)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorStart-1)); err != nil { + return err + } + } + return nil +} + +func (t *VerifyDealsForActivationParams) UnmarshalCBOR(r io.Reader) error { + *t = VerifyDealsForActivationParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealIDs ([]abi.DealID) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.DealIDs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.DealIDs = make([]abi.DealID, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) + } + + t.DealIDs[i] = abi.DealID(val) + } + + // t.SectorExpiry (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SectorExpiry = abi.ChainEpoch(extraI) + } + // t.SectorStart (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SectorStart = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufVerifyDealsForActivationReturn = []byte{130} + +func (t *VerifyDealsForActivationReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufVerifyDealsForActivationReturn); err != nil { + return err + } + + // t.DealWeight (big.Int) (struct) + if err := t.DealWeight.MarshalCBOR(w); err != nil { + return err + } + + // t.VerifiedDealWeight (big.Int) (struct) + if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *VerifyDealsForActivationReturn) UnmarshalCBOR(r io.Reader) error { + *t = VerifyDealsForActivationReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealWeight (big.Int) (struct) + + { + + if err := t.DealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) + } + + } + // t.VerifiedDealWeight (big.Int) (struct) + + { + + if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) + } + + } + return nil +} + +var lengthBufComputeDataCommitmentParams = []byte{130} + +func (t *ComputeDataCommitmentParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufComputeDataCommitmentParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.DealIDs ([]abi.DealID) (slice) + if len(t.DealIDs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.DealIDs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { + return err + } + for _, v := range t.DealIDs { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + + // t.SectorType (abi.RegisteredSealProof) (int64) + if t.SectorType >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorType)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorType-1)); err != nil { + return err + } + } + return nil +} + +func (t *ComputeDataCommitmentParams) UnmarshalCBOR(r io.Reader) error { + *t = ComputeDataCommitmentParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.DealIDs ([]abi.DealID) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.DealIDs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.DealIDs = make([]abi.DealID, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) + } + + t.DealIDs[i] = abi.DealID(val) + } + + // t.SectorType (abi.RegisteredSealProof) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SectorType = abi.RegisteredSealProof(extraI) + } + return nil +} + +var lengthBufOnMinerSectorsTerminateParams = []byte{130} + +func (t *OnMinerSectorsTerminateParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufOnMinerSectorsTerminateParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Epoch (abi.ChainEpoch) (int64) + if t.Epoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + + // t.DealIDs ([]abi.DealID) (slice) + if len(t.DealIDs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.DealIDs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { + return err + } + for _, v := range t.DealIDs { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + return nil +} + +func (t *OnMinerSectorsTerminateParams) UnmarshalCBOR(r io.Reader) error { + *t = OnMinerSectorsTerminateParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Epoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + // t.DealIDs ([]abi.DealID) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.DealIDs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.DealIDs = make([]abi.DealID, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) + } + + t.DealIDs[i] = abi.DealID(val) + } + + return nil +} + +var lengthBufPublishStorageDealsReturn = []byte{129} + +func (t *PublishStorageDealsReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufPublishStorageDealsReturn); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.IDs ([]abi.DealID) (slice) + if len(t.IDs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.IDs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.IDs))); err != nil { + return err + } + for _, v := range t.IDs { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + return nil +} + +func (t *PublishStorageDealsReturn) UnmarshalCBOR(r io.Reader) error { + *t = PublishStorageDealsReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.IDs ([]abi.DealID) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.IDs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.IDs = make([]abi.DealID, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.IDs slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.IDs was not a uint, instead got %d", maj) + } + + t.IDs[i] = abi.DealID(val) + } + + return nil +} + +var lengthBufDealProposal = []byte{139} + +func (t *DealProposal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufDealProposal); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PieceCID (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + // t.PieceSize (abi.PaddedPieceSize) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { + return err + } + + // t.VerifiedDeal (bool) (bool) + if err := cbg.WriteBool(w, t.VerifiedDeal); err != nil { + return err + } + + // t.Client (address.Address) (struct) + if err := t.Client.MarshalCBOR(w); err != nil { + return err + } + + // t.Provider (address.Address) (struct) + if err := t.Provider.MarshalCBOR(w); err != nil { + return err + } + + // t.Label (string) (string) + if len(t.Label) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Label was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Label))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Label)); err != nil { + return err + } + + // t.StartEpoch (abi.ChainEpoch) (int64) + if t.StartEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if t.EndEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + + // t.StoragePricePerEpoch (big.Int) (struct) + if err := t.StoragePricePerEpoch.MarshalCBOR(w); err != nil { + return err + } + + // t.ProviderCollateral (big.Int) (struct) + if err := t.ProviderCollateral.MarshalCBOR(w); err != nil { + return err + } + + // t.ClientCollateral (big.Int) (struct) + if err := t.ClientCollateral.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *DealProposal) UnmarshalCBOR(r io.Reader) error { + *t = DealProposal{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 11 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PieceCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + // t.PieceSize (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceSize = abi.PaddedPieceSize(extra) + + } + // t.VerifiedDeal (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.VerifiedDeal = false + case 21: + t.VerifiedDeal = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Client (address.Address) (struct) + + { + + if err := t.Client.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Client: %w", err) + } + + } + // t.Provider (address.Address) (struct) + + { + + if err := t.Provider.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Provider: %w", err) + } + + } + // t.Label (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Label = string(sval) + } + // t.StartEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + // t.EndEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + // t.StoragePricePerEpoch (big.Int) (struct) + + { + + if err := t.StoragePricePerEpoch.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.StoragePricePerEpoch: %w", err) + } + + } + // t.ProviderCollateral (big.Int) (struct) + + { + + if err := t.ProviderCollateral.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ProviderCollateral: %w", err) + } + + } + // t.ClientCollateral (big.Int) (struct) + + { + + if err := t.ClientCollateral.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ClientCollateral: %w", err) + } + + } + return nil +} + +var lengthBufClientDealProposal = []byte{130} + +func (t *ClientDealProposal) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufClientDealProposal); err != nil { + return err + } + + // t.Proposal (market.DealProposal) (struct) + if err := t.Proposal.MarshalCBOR(w); err != nil { + return err + } + + // t.ClientSignature (crypto.Signature) (struct) + if err := t.ClientSignature.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ClientDealProposal) UnmarshalCBOR(r io.Reader) error { + *t = ClientDealProposal{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Proposal (market.DealProposal) (struct) + + { + + if err := t.Proposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal: %w", err) + } + + } + // t.ClientSignature (crypto.Signature) (struct) + + { + + if err := t.ClientSignature.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ClientSignature: %w", err) + } + + } + return nil +} + +var lengthBufDealState = []byte{131} + +func (t *DealState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufDealState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.SectorStartEpoch (abi.ChainEpoch) (int64) + if t.SectorStartEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorStartEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SectorStartEpoch-1)); err != nil { + return err + } + } + + // t.LastUpdatedEpoch (abi.ChainEpoch) (int64) + if t.LastUpdatedEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LastUpdatedEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.LastUpdatedEpoch-1)); err != nil { + return err + } + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if t.SlashEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + return nil +} + +func (t *DealState) UnmarshalCBOR(r io.Reader) error { + *t = DealState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.SectorStartEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SectorStartEpoch = abi.ChainEpoch(extraI) + } + // t.LastUpdatedEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.LastUpdatedEpoch = abi.ChainEpoch(extraI) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/deal.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/deal.go new file mode 100644 index 0000000000..1958d0c1db --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/deal.go @@ -0,0 +1,85 @@ +package market + +import ( + "bytes" + + addr "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + acrypto "github.com/filecoin-project/go-state-types/crypto" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var PieceCIDPrefix = cid.Prefix{ + Version: 1, + Codec: cid.FilCommitmentUnsealed, + MhType: mh.SHA2_256_TRUNC254_PADDED, + MhLength: 32, +} + +// Note: Deal Collateral is only released and returned to clients and miners +// when the storage deal stops counting towards power. In the current iteration, +// it will be released when the sector containing the storage deals expires, +// even though some storage deals can expire earlier than the sector does. +// Collaterals are denominated in PerEpoch to incur a cost for self dealing or +// minimal deals that last for a long time. +// Note: ClientCollateralPerEpoch may not be needed and removed pending future confirmation. +// There will be a Minimum value for both client and provider deal collateral. +type DealProposal struct { + PieceCID cid.Cid `checked:"true"` // Checked in validateDeal, CommP + PieceSize abi.PaddedPieceSize + VerifiedDeal bool + Client addr.Address + Provider addr.Address + + // Label is an arbitrary client chosen label to apply to the deal + Label string + + // Nominal start epoch. Deal payment is linear between StartEpoch and EndEpoch, + // with total amount StoragePricePerEpoch * (EndEpoch - StartEpoch). + // Storage deal must appear in a sealed (proven) sector no later than StartEpoch, + // otherwise it is invalid. + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + StoragePricePerEpoch abi.TokenAmount + + ProviderCollateral abi.TokenAmount + ClientCollateral abi.TokenAmount +} + +// ClientDealProposal is a DealProposal signed by a client +type ClientDealProposal struct { + Proposal DealProposal + ClientSignature acrypto.Signature +} + +func (p *DealProposal) Duration() abi.ChainEpoch { + return p.EndEpoch - p.StartEpoch +} + +func (p *DealProposal) TotalStorageFee() abi.TokenAmount { + return big.Mul(p.StoragePricePerEpoch, big.NewInt(int64(p.Duration()))) +} + +func (p *DealProposal) ClientBalanceRequirement() abi.TokenAmount { + return big.Add(p.ClientCollateral, p.TotalStorageFee()) +} + +func (p *DealProposal) ProviderBalanceRequirement() abi.TokenAmount { + return p.ProviderCollateral +} + +func (p *DealProposal) Cid() (cid.Cid, error) { + buf := new(bytes.Buffer) + if err := p.MarshalCBOR(buf); err != nil { + return cid.Undef, err + } + return abi.CidBuilder.Sum(buf.Bytes()) +} + +type DealState struct { + SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated + SlashEpoch abi.ChainEpoch // -1 if deal never slashed +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/market_actor.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/market_actor.go new file mode 100644 index 0000000000..02a59634a7 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/market_actor.go @@ -0,0 +1,780 @@ +package market + +import ( + "bytes" + "encoding/binary" + "sort" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/builtin/power" + "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + "github.com/filecoin-project/specs-actors/actors/runtime" + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +type Actor struct{} + +type Runtime = runtime.Runtime + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.AddBalance, + 3: a.WithdrawBalance, + 4: a.PublishStorageDeals, + 5: a.VerifyDealsForActivation, + 6: a.ActivateDeals, + 7: a.OnMinerSectorsTerminate, + 8: a.ComputeDataCommitment, + 9: a.CronTick, + } +} + +func (a Actor) Code() cid.Cid { + return builtin.StorageMarketActorCodeID +} + +func (a Actor) IsSingleton() bool { + return true +} + +func (a Actor) State() cbor.Er { + return new(State) +} + +var _ runtime.VMActor = Actor{} + +//////////////////////////////////////////////////////////////////////////////// +// Actor methods +//////////////////////////////////////////////////////////////////////////////// + +func (a Actor) Constructor(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + + emptyArray, err := adt.MakeEmptyArray(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create state") + + emptyMap, err := adt.MakeEmptyMap(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create state") + + emptyMSet, err := MakeEmptySetMultimap(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create state") + + st := ConstructState(emptyArray, emptyMap, emptyMSet) + rt.StateCreate(st) + return nil +} + +type WithdrawBalanceParams struct { + ProviderOrClientAddress addr.Address + Amount abi.TokenAmount +} + +// Attempt to withdraw the specified amount from the balance held in escrow. +// If less than the specified amount is available, yields the entire available balance. +func (a Actor) WithdrawBalance(rt Runtime, params *WithdrawBalanceParams) *abi.EmptyValue { + if params.Amount.LessThan(big.Zero()) { + rt.Abortf(exitcode.ErrIllegalArgument, "negative amount %v", params.Amount) + } + // withdrawal can ONLY be done by a signing party. + rt.ValidateImmediateCallerType(builtin.CallerTypesSignable...) + + nominal, recipient, approvedCallers := escrowAddress(rt, params.ProviderOrClientAddress) + // for providers -> only corresponding owner or worker can withdraw + // for clients -> only the client i.e the recipient can withdraw + rt.ValidateImmediateCallerIs(approvedCallers...) + + amountExtracted := abi.NewTokenAmount(0) + var st State + rt.StateTransaction(&st, func() { + msm, err := st.mutator(adt.AsStore(rt)).withEscrowTable(WritePermission). + withLockedTable(WritePermission).build() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") + + // The withdrawable amount might be slightly less than nominal + // depending on whether or not all relevant entries have been processed + // by cron + minBalance, err := msm.lockedTable.Get(nominal) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get locked balance") + + ex, err := msm.escrowTable.SubtractWithMinimum(nominal, params.Amount, minBalance) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to subtract from escrow table") + + err = msm.commitState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") + + amountExtracted = ex + }) + + code := rt.Send(recipient, builtin.MethodSend, nil, amountExtracted, &builtin.Discard{}) + builtin.RequireSuccess(rt, code, "failed to send funds") + return nil +} + +// Deposits the received value into the balance held in escrow. +func (a Actor) AddBalance(rt Runtime, providerOrClientAddress *addr.Address) *abi.EmptyValue { + msgValue := rt.ValueReceived() + builtin.RequireParam(rt, msgValue.GreaterThan(big.Zero()), "balance to add must be greater than zero") + + // only signing parties can add balance for client AND provider. + rt.ValidateImmediateCallerType(builtin.CallerTypesSignable...) + + nominal, _, _ := escrowAddress(rt, *providerOrClientAddress) + + var st State + rt.StateTransaction(&st, func() { + msm, err := st.mutator(adt.AsStore(rt)).withEscrowTable(WritePermission). + withLockedTable(WritePermission).build() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") + + err = msm.escrowTable.Add(nominal, msgValue) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add balance to escrow table") + + err = msm.commitState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") + }) + return nil +} + +type PublishStorageDealsParams struct { + Deals []ClientDealProposal +} + +type PublishStorageDealsReturn struct { + IDs []abi.DealID +} + +// Publish a new set of storage deals (not yet included in a sector). +func (a Actor) PublishStorageDeals(rt Runtime, params *PublishStorageDealsParams) *PublishStorageDealsReturn { + + // Deal message must have a From field identical to the provider of all the deals. + // This allows us to retain and verify only the client's signature in each deal proposal itself. + rt.ValidateImmediateCallerType(builtin.CallerTypesSignable...) + if len(params.Deals) == 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "empty deals parameter") + } + + // All deals should have the same provider so get worker once + providerRaw := params.Deals[0].Proposal.Provider + provider, ok := rt.ResolveAddress(providerRaw) + if !ok { + rt.Abortf(exitcode.ErrNotFound, "failed to resolve provider address %v", providerRaw) + } + + codeID, ok := rt.GetActorCodeCID(provider) + builtin.RequireParam(rt, ok, "no codeId for address %v", provider) + if !codeID.Equals(builtin.StorageMinerActorCodeID) { + rt.Abortf(exitcode.ErrIllegalArgument, "deal provider is not a StorageMinerActor") + } + + _, worker, _ := builtin.RequestMinerControlAddrs(rt, provider) + if worker != rt.Caller() { + rt.Abortf(exitcode.ErrForbidden, "caller is not provider %v", provider) + } + + resolvedAddrs := make(map[addr.Address]addr.Address, len(params.Deals)) + baselinePower := requestCurrentBaselinePower(rt) + networkRawPower, networkQAPower := requestCurrentNetworkPower(rt) + + var newDealIds []abi.DealID + var st State + rt.StateTransaction(&st, func() { + msm, err := st.mutator(adt.AsStore(rt)).withPendingProposals(WritePermission). + withDealProposals(WritePermission).withDealsByEpoch(WritePermission).withEscrowTable(WritePermission). + withLockedTable(WritePermission).build() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") + + // All storage dealProposals will be added in an atomic transaction; this operation will be unrolled if any of them fails. + for di, deal := range params.Deals { + validateDeal(rt, deal, baselinePower, networkRawPower, networkQAPower) + + if deal.Proposal.Provider != provider && deal.Proposal.Provider != providerRaw { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot publish deals from different providers at the same time") + } + + client, ok := rt.ResolveAddress(deal.Proposal.Client) + if !ok { + rt.Abortf(exitcode.ErrNotFound, "failed to resolve client address %v", deal.Proposal.Client) + } + // Normalise provider and client addresses in the proposal stored on chain (after signature verification). + deal.Proposal.Provider = provider + resolvedAddrs[deal.Proposal.Client] = client + deal.Proposal.Client = client + + err, code := msm.lockClientAndProviderBalances(&deal.Proposal) + builtin.RequireNoErr(rt, err, code, "failed to lock balance") + + id := msm.generateStorageDealID() + + pcid, err := deal.Proposal.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to take cid of proposal %d", di) + + has, err := msm.pendingDeals.Get(abi.CidKey(pcid), nil) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to check for existence of deal proposal") + if has { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot publish duplicate deals") + } + + err = msm.pendingDeals.Put(abi.CidKey(pcid), &deal.Proposal) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set pending deal") + + err = msm.dealProposals.Set(id, &deal.Proposal) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal") + + // We should randomize the first epoch for when the deal will be processed so an attacker isn't able to + // schedule too many deals for the same tick. + processEpoch, err := genRandNextEpoch(rt.CurrEpoch(), &deal.Proposal, rt.GetRandomnessFromBeacon) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to generate random process epoch") + + err = msm.dealsByEpoch.Put(processEpoch, id) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal ops by epoch") + + newDealIds = append(newDealIds, id) + } + + err = msm.commitState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") + }) + + for _, deal := range params.Deals { + // Check VerifiedClient allowed cap and deduct PieceSize from cap. + // Either the DealSize is within the available DataCap of the VerifiedClient + // or this message will fail. We do not allow a deal that is partially verified. + if deal.Proposal.VerifiedDeal { + resolvedClient, ok := resolvedAddrs[deal.Proposal.Client] + builtin.RequireParam(rt, ok, "could not get resolvedClient client address") + + code := rt.Send( + builtin.VerifiedRegistryActorAddr, + builtin.MethodsVerifiedRegistry.UseBytes, + &verifreg.UseBytesParams{ + Address: resolvedClient, + DealSize: big.NewIntUnsigned(uint64(deal.Proposal.PieceSize)), + }, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "failed to add verified deal for client: %v", deal.Proposal.Client) + } + } + + return &PublishStorageDealsReturn{newDealIds} +} + +type VerifyDealsForActivationParams struct { + DealIDs []abi.DealID + SectorExpiry abi.ChainEpoch + SectorStart abi.ChainEpoch +} + +type VerifyDealsForActivationReturn struct { + DealWeight abi.DealWeight + VerifiedDealWeight abi.DealWeight +} + +// Verify that a given set of storage deals is valid for a sector currently being PreCommitted +// and return DealWeight of the set of storage deals given. +// The weight is defined as the sum, over all deals in the set, of the product of deal size and duration. +func (A Actor) VerifyDealsForActivation(rt Runtime, params *VerifyDealsForActivationParams) *VerifyDealsForActivationReturn { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + minerAddr := rt.Caller() + + var st State + rt.StateReadonly(&st) + store := adt.AsStore(rt) + + dealWeight, verifiedWeight, err := ValidateDealsForActivation(&st, store, params.DealIDs, minerAddr, params.SectorExpiry, params.SectorStart) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to validate dealProposals for activation") + + return &VerifyDealsForActivationReturn{ + DealWeight: dealWeight, + VerifiedDealWeight: verifiedWeight, + } +} + +type ActivateDealsParams struct { + DealIDs []abi.DealID + SectorExpiry abi.ChainEpoch +} + +// Verify that a given set of storage deals is valid for a sector currently being ProveCommitted, +// update the market's internal state accordingly. +func (a Actor) ActivateDeals(rt Runtime, params *ActivateDealsParams) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + minerAddr := rt.Caller() + currEpoch := rt.CurrEpoch() + + var st State + store := adt.AsStore(rt) + + // Update deal dealStates. + rt.StateTransaction(&st, func() { + _, _, err := ValidateDealsForActivation(&st, store, params.DealIDs, minerAddr, params.SectorExpiry, currEpoch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to validate dealProposals for activation") + + msm, err := st.mutator(adt.AsStore(rt)).withDealStates(WritePermission). + withPendingProposals(ReadOnlyPermission).withDealProposals(ReadOnlyPermission).build() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") + + for _, dealID := range params.DealIDs { + // This construction could be replaced with a single "update deal state" state method, possibly batched + // over all deal ids at once. + _, found, err := msm.dealStates.Get(dealID) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get state for dealId %d", dealID) + if found { + rt.Abortf(exitcode.ErrIllegalArgument, "deal %d already included in another sector", dealID) + } + + proposal, err := getDealProposal(msm.dealProposals, dealID) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get dealId %d", dealID) + + propc, err := proposal.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to calculate proposal CID") + + has, err := msm.pendingDeals.Get(abi.CidKey(propc), nil) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get pending proposal %v", propc) + + if !has { + rt.Abortf(exitcode.ErrIllegalState, "tried to activate deal that was not in the pending set (%s)", propc) + } + + err = msm.dealStates.Set(dealID, &DealState{ + SectorStartEpoch: currEpoch, + LastUpdatedEpoch: epochUndefined, + SlashEpoch: epochUndefined, + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal state %d", dealID) + } + + err = msm.commitState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") + }) + + return nil +} + +type ComputeDataCommitmentParams struct { + DealIDs []abi.DealID + SectorType abi.RegisteredSealProof +} + +func (a Actor) ComputeDataCommitment(rt Runtime, params *ComputeDataCommitmentParams) *cbg.CborCid { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + + var st State + rt.StateReadonly(&st) + proposals, err := AsDealProposalArray(adt.AsStore(rt), st.Proposals) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deal dealProposals") + + pieces := make([]abi.PieceInfo, 0) + for _, dealID := range params.DealIDs { + deal, err := getDealProposal(proposals, dealID) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get dealId %d", dealID) + + pieces = append(pieces, abi.PieceInfo{ + PieceCID: deal.PieceCID, + Size: deal.PieceSize, + }) + } + + commd, err := rt.ComputeUnsealedSectorCID(params.SectorType, pieces) + if err != nil { + rt.Abortf(exitcode.ErrIllegalArgument, "failed to compute unsealed sector CID: %s", err) + } + + return (*cbg.CborCid)(&commd) +} + +type OnMinerSectorsTerminateParams struct { + Epoch abi.ChainEpoch + DealIDs []abi.DealID +} + +// Terminate a set of deals in response to their containing sector being terminated. +// Slash provider collateral, refund client collateral, and refund partial unpaid escrow +// amount to client. +func (a Actor) OnMinerSectorsTerminate(rt Runtime, params *OnMinerSectorsTerminateParams) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + minerAddr := rt.Caller() + + var st State + rt.StateTransaction(&st, func() { + msm, err := st.mutator(adt.AsStore(rt)).withDealStates(WritePermission). + withDealProposals(ReadOnlyPermission).build() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deal state") + + for _, dealID := range params.DealIDs { + deal, found, err := msm.dealProposals.Get(dealID) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get deal proposal %v", dealID) + // deal could have terminated and hence deleted before the sector is terminated. + // we should simply continue instead of aborting execution here if a deal is not found. + if !found { + continue + } + + AssertMsg(deal.Provider == minerAddr, "caller is not the provider of the deal") + + // do not slash expired deals + if deal.EndEpoch <= params.Epoch { + continue + } + + state, found, err := msm.dealStates.Get(dealID) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get deal state %v", dealID) + if !found { + rt.Abortf(exitcode.ErrIllegalArgument, "no state for deal %v", dealID) + } + + // if a deal is already slashed, we don't need to do anything here. + if state.SlashEpoch != epochUndefined { + continue + } + + // mark the deal for slashing here. + // actual releasing of locked funds for the client and slashing of provider collateral happens in CronTick. + state.SlashEpoch = params.Epoch + + err = msm.dealStates.Set(dealID, state) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal state %v", dealID) + } + + err = msm.commitState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") + }) + return nil +} + +func (a Actor) CronTick(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.CronActorAddr) + amountSlashed := big.Zero() + + var timedOutVerifiedDeals []*DealProposal + + var st State + rt.StateTransaction(&st, func() { + updatesNeeded := make(map[abi.ChainEpoch][]abi.DealID) + + msm, err := st.mutator(adt.AsStore(rt)).withDealStates(WritePermission). + withLockedTable(WritePermission).withEscrowTable(WritePermission).withDealsByEpoch(WritePermission). + withDealProposals(WritePermission).withPendingProposals(WritePermission).build() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state") + + for i := st.LastCron + 1; i <= rt.CurrEpoch(); i++ { + err = msm.dealsByEpoch.ForEach(i, func(dealID abi.DealID) error { + deal, err := getDealProposal(msm.dealProposals, dealID) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get dealId %d", dealID) + + dcid, err := deal.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to calculate CID for proposal %v", dealID) + + state, found, err := msm.dealStates.Get(dealID) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get deal state") + + // deal has been published but not activated yet -> terminate it as it has timed out + if !found { + // Not yet appeared in proven sector; check for timeout. + AssertMsg(rt.CurrEpoch() >= deal.StartEpoch, "if sector start is not set, we must be in a timed out state") + + slashed := msm.processDealInitTimedOut(rt, deal) + if !slashed.IsZero() { + amountSlashed = big.Add(amountSlashed, slashed) + } + if deal.VerifiedDeal { + timedOutVerifiedDeals = append(timedOutVerifiedDeals, deal) + } + + // we should not attempt to delete the DealState because it does NOT exist + if err := deleteDealProposalAndState(dealID, msm.dealStates, msm.dealProposals, true, false); err != nil { + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete deal") + } + + pdErr := msm.pendingDeals.Delete(abi.CidKey(dcid)) + builtin.RequireNoErr(rt, pdErr, exitcode.ErrIllegalState, "failed to delete pending proposal") + + return nil + } + + // if this is the first cron tick for the deal, it should be in the pending state. + if state.LastUpdatedEpoch == epochUndefined { + pdErr := msm.pendingDeals.Delete(abi.CidKey(dcid)) + builtin.RequireNoErr(rt, pdErr, exitcode.ErrIllegalState, "failed to delete pending proposal") + } + + slashAmount, nextEpoch, removeDeal := msm.updatePendingDealState(rt, state, deal, rt.CurrEpoch()) + Assert(slashAmount.GreaterThanEqual(big.Zero())) + + if removeDeal { + AssertMsg(nextEpoch == epochUndefined, "next scheduled epoch should be undefined as deal has been removed") + + amountSlashed = big.Add(amountSlashed, slashAmount) + err := deleteDealProposalAndState(dealID, msm.dealStates, msm.dealProposals, true, true) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete deal proposal and states") + } else { + AssertMsg(nextEpoch > rt.CurrEpoch() && slashAmount.IsZero(), "deal should not be slashed and should have a schedule for next cron tick"+ + " as it has not been removed") + + // Update deal's LastUpdatedEpoch in DealStates + state.LastUpdatedEpoch = rt.CurrEpoch() + err = msm.dealStates.Set(dealID, state) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to set deal state") + + updatesNeeded[nextEpoch] = append(updatesNeeded[nextEpoch], dealID) + } + + return nil + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to iterate deal ops") + + err = msm.dealsByEpoch.RemoveAll(i) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete deal ops for epoch %v", i) + } + + // Iterate changes in sorted order to ensure that loads/stores + // are deterministic. Otherwise, we could end up charging an + // inconsistent amount of gas. + changedEpochs := make([]abi.ChainEpoch, 0, len(updatesNeeded)) + for epoch := range updatesNeeded { //nolint:nomaprange + changedEpochs = append(changedEpochs, epoch) + } + + sort.Slice(changedEpochs, func(i, j int) bool { return changedEpochs[i] < changedEpochs[j] }) + + for _, epoch := range changedEpochs { + err = msm.dealsByEpoch.PutMany(epoch, updatesNeeded[epoch]) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to reinsert deal IDs for epoch %v", epoch) + } + + st.LastCron = rt.CurrEpoch() + + err = msm.commitState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush state") + }) + + for _, d := range timedOutVerifiedDeals { + code := rt.Send( + builtin.VerifiedRegistryActorAddr, + builtin.MethodsVerifiedRegistry.RestoreBytes, + &verifreg.RestoreBytesParams{ + Address: d.Client, + DealSize: big.NewIntUnsigned(uint64(d.PieceSize)), + }, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to send RestoreBytes call to the VerifReg actor for timed-out verified deal, client: %s, dealSize: %v, "+ + "provider: %v, got code %v", d.Client, d.PieceSize, d.Provider, code) + } + } + + if !amountSlashed.IsZero() { + e := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, amountSlashed, &builtin.Discard{}) + builtin.RequireSuccess(rt, e, "expected send to burnt funds actor to succeed") + } + + return nil +} + +func genRandNextEpoch(currEpoch abi.ChainEpoch, deal *DealProposal, rbF func(crypto.DomainSeparationTag, abi.ChainEpoch, []byte) abi.Randomness) (abi.ChainEpoch, error) { + buf := bytes.Buffer{} + if err := deal.MarshalCBOR(&buf); err != nil { + return epochUndefined, xerrors.Errorf("failed to marshal proposal: %w", err) + } + + rb := rbF(crypto.DomainSeparationTag_MarketDealCronSeed, currEpoch-1, buf.Bytes()) + + // generate a random epoch in [baseEpoch, baseEpoch + DealUpdatesInterval) + offset := binary.BigEndian.Uint64(rb) + + return deal.StartEpoch + abi.ChainEpoch(offset%uint64(DealUpdatesInterval)), nil +} + +func deleteDealProposalAndState(dealId abi.DealID, states *DealMetaArray, proposals *DealArray, removeProposal bool, + removeState bool) error { + if removeProposal { + if err := proposals.Delete(uint64(dealId)); err != nil { + return xerrors.Errorf("failed to delete deal proposal: %w", err) + } + } + + if removeState { + if err := states.Delete(dealId); err != nil { + return xerrors.Errorf("failed to delete deal state: %w", err) + } + } + + return nil +} + +// +// Exported functions +// + +// Validates a collection of deal dealProposals for activation, and returns their combined weight, +// split into regular deal weight and verified deal weight. +func ValidateDealsForActivation(st *State, store adt.Store, dealIDs []abi.DealID, minerAddr addr.Address, + sectorExpiry, currEpoch abi.ChainEpoch) (big.Int, big.Int, error) { + + proposals, err := AsDealProposalArray(store, st.Proposals) + if err != nil { + return big.Int{}, big.Int{}, xerrors.Errorf("failed to load dealProposals: %w", err) + } + + totalDealSpaceTime := big.Zero() + totalVerifiedSpaceTime := big.Zero() + for _, dealID := range dealIDs { + proposal, found, err := proposals.Get(dealID) + if err != nil { + return big.Int{}, big.Int{}, xerrors.Errorf("failed to load deal %d: %w", dealID, err) + } + if !found { + return big.Int{}, big.Int{}, exitcode.ErrNotFound.Wrapf("no such deal %d", dealID) + } + if err = validateDealCanActivate(proposal, minerAddr, sectorExpiry, currEpoch); err != nil { + return big.Int{}, big.Int{}, xerrors.Errorf("cannot activate deal %d: %w", dealID, err) + } + + // Compute deal weight + dealSpaceTime := DealWeight(proposal) + if proposal.VerifiedDeal { + totalVerifiedSpaceTime = big.Add(totalVerifiedSpaceTime, dealSpaceTime) + } else { + totalDealSpaceTime = big.Add(totalDealSpaceTime, dealSpaceTime) + } + } + return totalDealSpaceTime, totalVerifiedSpaceTime, nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Checks +//////////////////////////////////////////////////////////////////////////////// + +func validateDealCanActivate(proposal *DealProposal, minerAddr addr.Address, sectorExpiration, currEpoch abi.ChainEpoch) error { + if proposal.Provider != minerAddr { + return exitcode.ErrForbidden.Wrapf("proposal has provider %v, must be %v", proposal.Provider, minerAddr) + } + if currEpoch > proposal.StartEpoch { + return exitcode.ErrIllegalArgument.Wrapf("proposal start epoch %d has already elapsed at %d", proposal.StartEpoch, currEpoch) + } + if proposal.EndEpoch > sectorExpiration { + return exitcode.ErrIllegalArgument.Wrapf("proposal expiration %d exceeds sector expiration %d", proposal.EndEpoch, sectorExpiration) + } + return nil +} + +func validateDeal(rt Runtime, deal ClientDealProposal, baselinePower, networkRawPower, networkQAPower abi.StoragePower) { + if err := dealProposalIsInternallyValid(rt, deal); err != nil { + rt.Abortf(exitcode.ErrIllegalArgument, "Invalid deal proposal: %s", err) + } + + proposal := deal.Proposal + + if err := proposal.PieceSize.Validate(); err != nil { + rt.Abortf(exitcode.ErrIllegalArgument, "proposal piece size is invalid: %v", err) + } + + if !proposal.PieceCID.Defined() { + rt.Abortf(exitcode.ErrIllegalArgument, "proposal PieceCID undefined") + } + + if proposal.PieceCID.Prefix() != PieceCIDPrefix { + rt.Abortf(exitcode.ErrIllegalArgument, "proposal PieceCID had wrong prefix") + } + + if proposal.EndEpoch <= proposal.StartEpoch { + rt.Abortf(exitcode.ErrIllegalArgument, "proposal end before proposal start") + } + + if rt.CurrEpoch() > proposal.StartEpoch { + rt.Abortf(exitcode.ErrIllegalArgument, "Deal start epoch has already elapsed.") + } + + minDuration, maxDuration := dealDurationBounds(proposal.PieceSize) + if proposal.Duration() < minDuration || proposal.Duration() > maxDuration { + rt.Abortf(exitcode.ErrIllegalArgument, "Deal duration out of bounds.") + } + + minPrice, maxPrice := dealPricePerEpochBounds(proposal.PieceSize, proposal.Duration()) + if proposal.StoragePricePerEpoch.LessThan(minPrice) || proposal.StoragePricePerEpoch.GreaterThan(maxPrice) { + rt.Abortf(exitcode.ErrIllegalArgument, "Storage price out of bounds.") + } + + minProviderCollateral, maxProviderCollateral := DealProviderCollateralBounds(proposal.PieceSize, proposal.VerifiedDeal, networkRawPower, networkQAPower, baselinePower, rt.TotalFilCircSupply(), rt.NetworkVersion()) + if proposal.ProviderCollateral.LessThan(minProviderCollateral) || proposal.ProviderCollateral.GreaterThan(maxProviderCollateral) { + rt.Abortf(exitcode.ErrIllegalArgument, "Provider collateral out of bounds.") + } + + minClientCollateral, maxClientCollateral := DealClientCollateralBounds(proposal.PieceSize, proposal.Duration()) + if proposal.ClientCollateral.LessThan(minClientCollateral) || proposal.ClientCollateral.GreaterThan(maxClientCollateral) { + rt.Abortf(exitcode.ErrIllegalArgument, "Client collateral out of bounds.") + } +} + +// +// Helpers +// + +// Resolves a provider or client address to the canonical form against which a balance should be held, and +// the designated recipient address of withdrawals (which is the same, for simple account parties). +func escrowAddress(rt Runtime, address addr.Address) (nominal addr.Address, recipient addr.Address, approved []addr.Address) { + // Resolve the provided address to the canonical form against which the balance is held. + nominal, ok := rt.ResolveAddress(address) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "failed to resolve address %v", address) + } + + codeID, ok := rt.GetActorCodeCID(nominal) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "no code for address %v", nominal) + } + + if codeID.Equals(builtin.StorageMinerActorCodeID) { + // Storage miner actor entry; implied funds recipient is the associated owner address. + ownerAddr, workerAddr, _ := builtin.RequestMinerControlAddrs(rt, nominal) + return nominal, ownerAddr, []addr.Address{ownerAddr, workerAddr} + } + + return nominal, nominal, []addr.Address{nominal} +} + +func getDealProposal(proposals *DealArray, dealID abi.DealID) (*DealProposal, error) { + proposal, found, err := proposals.Get(dealID) + if err != nil { + return nil, xerrors.Errorf("failed to load proposal: %w", err) + } + if !found { + return nil, exitcode.ErrNotFound.Wrapf("no such deal %d", dealID) + } + + return proposal, nil +} + +// Requests the current epoch target block reward from the reward actor. +func requestCurrentBaselinePower(rt Runtime) abi.StoragePower { + var ret reward.ThisEpochRewardReturn + code := rt.Send(builtin.RewardActorAddr, builtin.MethodsReward.ThisEpochReward, nil, big.Zero(), &ret) + builtin.RequireSuccess(rt, code, "failed to check epoch baseline power") + return ret.ThisEpochBaselinePower +} + +// Requests the current network total power and pledge from the power actor. +func requestCurrentNetworkPower(rt Runtime) (rawPower, qaPower abi.StoragePower) { + var pwr power.CurrentTotalPowerReturn + code := rt.Send(builtin.StoragePowerActorAddr, builtin.MethodsPower.CurrentTotalPower, nil, big.Zero(), &pwr) + builtin.RequireSuccess(rt, code, "failed to check current power") + return pwr.RawBytePower, pwr.QualityAdjPower +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/market_state.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/market_state.go new file mode 100644 index 0000000000..dc076e29e4 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/market_state.go @@ -0,0 +1,410 @@ +package market + +import ( + "bytes" + + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + exitcode "github.com/filecoin-project/go-state-types/exitcode" + "github.com/ipfs/go-cid" + xerrors "golang.org/x/xerrors" + + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +const epochUndefined = abi.ChainEpoch(-1) + +// Market mutations +// add / rm balance +// pub deal (always provider) +// activate deal (miner) +// end deal (miner terminate, expire(no activation)) + +// BalanceLockingReason is the reason behind locking an amount. +type BalanceLockingReason int + +const ( + ClientCollateral BalanceLockingReason = iota + ClientStorageFee + ProviderCollateral +) + +type State struct { + Proposals cid.Cid // AMT[DealID]DealProposal + States cid.Cid // AMT[DealID]DealState + + // PendingProposals tracks dealProposals that have not yet reached their deal start date. + // We track them here to ensure that miners can't publish the same deal proposal twice + PendingProposals cid.Cid // HAMT[DealCid]DealProposal + + // Total amount held in escrow, indexed by actor address (including both locked and unlocked amounts). + EscrowTable cid.Cid // BalanceTable + + // Amount locked, indexed by actor address. + // Note: the amounts in this table do not affect the overall amount in escrow: + // only the _portion_ of the total escrow amount that is locked. + LockedTable cid.Cid // BalanceTable + + NextID abi.DealID + + // Metadata cached for efficient iteration over deals. + DealOpsByEpoch cid.Cid // SetMultimap, HAMT[epoch]Set + LastCron abi.ChainEpoch + + // Total Client Collateral that is locked -> unlocked when deal is terminated + TotalClientLockedCollateral abi.TokenAmount + // Total Provider Collateral that is locked -> unlocked when deal is terminated + TotalProviderLockedCollateral abi.TokenAmount + // Total storage fee that is locked in escrow -> unlocked when payments are made + TotalClientStorageFee abi.TokenAmount +} + +func ConstructState(emptyArrayCid, emptyMapCid, emptyMSetCid cid.Cid) *State { + return &State{ + Proposals: emptyArrayCid, + States: emptyArrayCid, + PendingProposals: emptyMapCid, + EscrowTable: emptyMapCid, + LockedTable: emptyMapCid, + NextID: abi.DealID(0), + DealOpsByEpoch: emptyMSetCid, + LastCron: abi.ChainEpoch(-1), + + TotalClientLockedCollateral: abi.NewTokenAmount(0), + TotalProviderLockedCollateral: abi.NewTokenAmount(0), + TotalClientStorageFee: abi.NewTokenAmount(0), + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Deal state operations +//////////////////////////////////////////////////////////////////////////////// + +func (m *marketStateMutation) updatePendingDealState(rt Runtime, state *DealState, deal *DealProposal, epoch abi.ChainEpoch) (amountSlashed abi.TokenAmount, nextEpoch abi.ChainEpoch, removeDeal bool) { + amountSlashed = abi.NewTokenAmount(0) + + everUpdated := state.LastUpdatedEpoch != epochUndefined + everSlashed := state.SlashEpoch != epochUndefined + + Assert(!everUpdated || (state.LastUpdatedEpoch <= epoch)) // if the deal was ever updated, make sure it didn't happen in the future + + // This would be the case that the first callback somehow triggers before it is scheduled to + // This is expected not to be able to happen + if deal.StartEpoch > epoch { + return amountSlashed, epochUndefined, false + } + + paymentEndEpoch := deal.EndEpoch + if everSlashed { + AssertMsg(epoch >= state.SlashEpoch, "current epoch less than slash epoch") + Assert(state.SlashEpoch <= deal.EndEpoch) + paymentEndEpoch = state.SlashEpoch + } else if epoch < paymentEndEpoch { + paymentEndEpoch = epoch + } + + paymentStartEpoch := deal.StartEpoch + if everUpdated && state.LastUpdatedEpoch > paymentStartEpoch { + paymentStartEpoch = state.LastUpdatedEpoch + } + + numEpochsElapsed := paymentEndEpoch - paymentStartEpoch + + { + // Process deal payment for the elapsed epochs. + totalPayment := big.Mul(big.NewInt(int64(numEpochsElapsed)), deal.StoragePricePerEpoch) + + // the transfer amount can be less than or equal to zero if a deal is slashed before or at the deal's start epoch. + if totalPayment.GreaterThan(big.Zero()) { + m.transferBalance(rt, deal.Client, deal.Provider, totalPayment) + } + } + + if everSlashed { + // unlock client collateral and locked storage fee + paymentRemaining := dealGetPaymentRemaining(deal, state.SlashEpoch) + + // unlock remaining storage fee + if err := m.unlockBalance(deal.Client, paymentRemaining, ClientStorageFee); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to unlock remaining client storage fee: %s", err) + } + // unlock client collateral + if err := m.unlockBalance(deal.Client, deal.ClientCollateral, ClientCollateral); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to unlock client collateral: %s", err) + } + + // slash provider collateral + amountSlashed = deal.ProviderCollateral + if err := m.slashBalance(deal.Provider, amountSlashed, ProviderCollateral); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "slashing balance: %s", err) + } + + return amountSlashed, epochUndefined, true + } + + if epoch >= deal.EndEpoch { + m.processDealExpired(rt, deal, state) + return amountSlashed, epochUndefined, true + } + + // We're explicitly not inspecting the end epoch and may process a deal's expiration late, in order to prevent an outsider + // from loading a cron tick by activating too many deals with the same end epoch. + nextEpoch = epoch + DealUpdatesInterval + + return amountSlashed, nextEpoch, false +} + +// Deal start deadline elapsed without appearing in a proven sector. +// Slash a portion of provider's collateral, and unlock remaining collaterals +// for both provider and client. +func (m *marketStateMutation) processDealInitTimedOut(rt Runtime, deal *DealProposal) abi.TokenAmount { + if err := m.unlockBalance(deal.Client, deal.TotalStorageFee(), ClientStorageFee); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failure unlocking client storage fee: %s", err) + } + if err := m.unlockBalance(deal.Client, deal.ClientCollateral, ClientCollateral); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failure unlocking client collateral: %s", err) + } + + amountSlashed := collateralPenaltyForDealActivationMissed(deal.ProviderCollateral) + amountRemaining := big.Sub(deal.ProviderBalanceRequirement(), amountSlashed) + + if err := m.slashBalance(deal.Provider, amountSlashed, ProviderCollateral); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to slash balance: %s", err) + } + + if err := m.unlockBalance(deal.Provider, amountRemaining, ProviderCollateral); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to unlock deal provider balance: %s", err) + } + + return amountSlashed +} + +// Normal expiration. Unlock collaterals for both provider and client. +func (m *marketStateMutation) processDealExpired(rt Runtime, deal *DealProposal, state *DealState) { + Assert(state.SectorStartEpoch != epochUndefined) + + // Note: payment has already been completed at this point (_rtProcessDealPaymentEpochsElapsed) + if err := m.unlockBalance(deal.Provider, deal.ProviderCollateral, ProviderCollateral); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed unlocking deal provider balance: %s", err) + } + + if err := m.unlockBalance(deal.Client, deal.ClientCollateral, ClientCollateral); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed unlocking deal client balance: %s", err) + } +} + +func (m *marketStateMutation) generateStorageDealID() abi.DealID { + ret := m.nextDealId + m.nextDealId = m.nextDealId + abi.DealID(1) + return ret +} + +//////////////////////////////////////////////////////////////////////////////// +// State utility functions +//////////////////////////////////////////////////////////////////////////////// + +func dealProposalIsInternallyValid(rt Runtime, proposal ClientDealProposal) error { + // Note: we do not verify the provider signature here, since this is implicit in the + // authenticity of the on-chain message publishing the deal. + buf := bytes.Buffer{} + err := proposal.Proposal.MarshalCBOR(&buf) + if err != nil { + return xerrors.Errorf("proposal signature verification failed to marshal proposal: %w", err) + } + err = rt.VerifySignature(proposal.ClientSignature, proposal.Proposal.Client, buf.Bytes()) + if err != nil { + return xerrors.Errorf("signature proposal invalid: %w", err) + } + return nil +} + +func dealGetPaymentRemaining(deal *DealProposal, slashEpoch abi.ChainEpoch) abi.TokenAmount { + Assert(slashEpoch <= deal.EndEpoch) + + // Payments are always for start -> end epoch irrespective of when the deal is slashed. + if slashEpoch < deal.StartEpoch { + slashEpoch = deal.StartEpoch + } + + durationRemaining := deal.EndEpoch - slashEpoch + Assert(durationRemaining >= 0) + + return big.Mul(big.NewInt(int64(durationRemaining)), deal.StoragePricePerEpoch) +} + +// MarketStateMutationPermission is the mutation permission on a state field +type MarketStateMutationPermission int + +const ( + // Invalid means NO permission + Invalid MarketStateMutationPermission = iota + // ReadOnlyPermission allows reading but not mutating the field + ReadOnlyPermission + // WritePermission allows mutating the field + WritePermission +) + +type marketStateMutation struct { + st *State + store adt.Store + + proposalPermit MarketStateMutationPermission + dealProposals *DealArray + + statePermit MarketStateMutationPermission + dealStates *DealMetaArray + + escrowPermit MarketStateMutationPermission + escrowTable *adt.BalanceTable + + pendingPermit MarketStateMutationPermission + pendingDeals *adt.Map + + dpePermit MarketStateMutationPermission + dealsByEpoch *SetMultimap + + lockedPermit MarketStateMutationPermission + lockedTable *adt.BalanceTable + totalClientLockedCollateral abi.TokenAmount + totalProviderLockedCollateral abi.TokenAmount + totalClientStorageFee abi.TokenAmount + + nextDealId abi.DealID +} + +func (s *State) mutator(store adt.Store) *marketStateMutation { + return &marketStateMutation{st: s, store: store} +} + +func (m *marketStateMutation) build() (*marketStateMutation, error) { + if m.proposalPermit != Invalid { + proposals, err := AsDealProposalArray(m.store, m.st.Proposals) + if err != nil { + return nil, xerrors.Errorf("failed to load deal proposals: %w", err) + } + m.dealProposals = proposals + } + + if m.statePermit != Invalid { + states, err := AsDealStateArray(m.store, m.st.States) + if err != nil { + return nil, xerrors.Errorf("failed to load deal state: %w", err) + } + m.dealStates = states + } + + if m.lockedPermit != Invalid { + lt, err := adt.AsBalanceTable(m.store, m.st.LockedTable) + if err != nil { + return nil, xerrors.Errorf("failed to load locked table: %w", err) + } + m.lockedTable = lt + m.totalClientLockedCollateral = m.st.TotalClientLockedCollateral.Copy() + m.totalClientStorageFee = m.st.TotalClientStorageFee.Copy() + m.totalProviderLockedCollateral = m.st.TotalProviderLockedCollateral.Copy() + } + + if m.escrowPermit != Invalid { + et, err := adt.AsBalanceTable(m.store, m.st.EscrowTable) + if err != nil { + return nil, xerrors.Errorf("failed to load escrow table: %w", err) + } + m.escrowTable = et + } + + if m.pendingPermit != Invalid { + pending, err := adt.AsMap(m.store, m.st.PendingProposals) + if err != nil { + return nil, xerrors.Errorf("failed to load pending proposals: %w", err) + } + m.pendingDeals = pending + } + + if m.dpePermit != Invalid { + dbe, err := AsSetMultimap(m.store, m.st.DealOpsByEpoch) + if err != nil { + return nil, xerrors.Errorf("failed to load deals by epoch: %w", err) + } + m.dealsByEpoch = dbe + } + + m.nextDealId = m.st.NextID + + return m, nil +} + +func (m *marketStateMutation) withDealProposals(permit MarketStateMutationPermission) *marketStateMutation { + m.proposalPermit = permit + return m +} + +func (m *marketStateMutation) withDealStates(permit MarketStateMutationPermission) *marketStateMutation { + m.statePermit = permit + return m +} + +func (m *marketStateMutation) withEscrowTable(permit MarketStateMutationPermission) *marketStateMutation { + m.escrowPermit = permit + return m +} + +func (m *marketStateMutation) withLockedTable(permit MarketStateMutationPermission) *marketStateMutation { + m.lockedPermit = permit + return m +} + +func (m *marketStateMutation) withPendingProposals(permit MarketStateMutationPermission) *marketStateMutation { + m.pendingPermit = permit + return m +} + +func (m *marketStateMutation) withDealsByEpoch(permit MarketStateMutationPermission) *marketStateMutation { + m.dpePermit = permit + return m +} + +func (m *marketStateMutation) commitState() error { + var err error + if m.proposalPermit == WritePermission { + if m.st.Proposals, err = m.dealProposals.Root(); err != nil { + return xerrors.Errorf("failed to flush deal dealProposals: %w", err) + } + } + + if m.statePermit == WritePermission { + if m.st.States, err = m.dealStates.Root(); err != nil { + return xerrors.Errorf("failed to flush deal states: %w", err) + } + } + + if m.lockedPermit == WritePermission { + if m.st.LockedTable, err = m.lockedTable.Root(); err != nil { + return xerrors.Errorf("failed to flush locked table: %w", err) + } + m.st.TotalClientLockedCollateral = m.totalClientLockedCollateral.Copy() + m.st.TotalProviderLockedCollateral = m.totalProviderLockedCollateral.Copy() + m.st.TotalClientStorageFee = m.totalClientStorageFee.Copy() + } + + if m.escrowPermit == WritePermission { + if m.st.EscrowTable, err = m.escrowTable.Root(); err != nil { + return xerrors.Errorf("failed to flush escrow table: %w", err) + } + } + + if m.pendingPermit == WritePermission { + if m.st.PendingProposals, err = m.pendingDeals.Root(); err != nil { + return xerrors.Errorf("failed to flush pending deals: %w", err) + } + } + + if m.dpePermit == WritePermission { + if m.st.DealOpsByEpoch, err = m.dealsByEpoch.Root(); err != nil { + return xerrors.Errorf("failed to flush deals by epoch: %w", err) + } + } + + m.st.NextID = m.nextDealId + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/policy.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/policy.go new file mode 100644 index 0000000000..c6216a0286 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/policy.go @@ -0,0 +1,84 @@ +package market + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/specs-actors/actors/builtin" +) + +// DealUpdatesInterval is the number of blocks between payouts for deals +const DealUpdatesInterval = builtin.EpochsInDay + +// ProvCollateralPercentSupplyNum is the numerator of the percentage of normalized cirulating +// supply that must be covered by provider collateral +var ProvCollateralPercentSupplyNumV0 = big.NewInt(5) +var ProvCollateralPercentSupplyNumV1 = big.NewInt(1) + +// ProvCollateralPercentSupplyDenom is the denominator of the percentage of normalized cirulating +// supply that must be covered by provider collateral +var ProvCollateralPercentSupplyDenom = big.NewInt(100) + +// Bounds (inclusive) on deal duration +func dealDurationBounds(size abi.PaddedPieceSize) (min abi.ChainEpoch, max abi.ChainEpoch) { + return abi.ChainEpoch(180 * builtin.EpochsInDay), abi.ChainEpoch(540 * builtin.EpochsInDay) // PARAM_FINISH +} + +func dealPricePerEpochBounds(size abi.PaddedPieceSize, duration abi.ChainEpoch) (min abi.TokenAmount, max abi.TokenAmount) { + return abi.NewTokenAmount(0), builtin.TotalFilecoin // PARAM_FINISH +} + +func DealProviderCollateralBounds(pieceSize abi.PaddedPieceSize, verified bool, networkRawPower, networkQAPower, baselinePower abi.StoragePower, + networkCirculatingSupply abi.TokenAmount, networkVersion network.Version) (min, max abi.TokenAmount) { + // minimumProviderCollateral = (ProvCollateralPercentSupplyNum / ProvCollateralPercentSupplyDenom) * normalizedCirculatingSupply + // normalizedCirculatingSupply = FILCirculatingSupply * dealPowerShare + // dealPowerShare = dealQAPower / max(BaselinePower(t), NetworkQAPower(t), dealQAPower) + + lockTargetNum := big.Mul(ProvCollateralPercentSupplyNumV0, networkCirculatingSupply) + powerShareNum := dealQAPower(pieceSize, verified) + powerShareDenom := big.Max(big.Max(networkQAPower, baselinePower), powerShareNum) + + if networkVersion >= network.Version1 { + lockTargetNum = big.Mul(ProvCollateralPercentSupplyNumV1, networkCirculatingSupply) + powerShareNum = big.NewIntUnsigned(uint64(pieceSize)) + powerShareDenom = big.Max(big.Max(networkRawPower, baselinePower), powerShareNum) + } + + lockTargetDenom := ProvCollateralPercentSupplyDenom + + num := big.Mul(lockTargetNum, powerShareNum) + denom := big.Mul(lockTargetDenom, powerShareDenom) + minCollateral := big.Div(num, denom) + return minCollateral, builtin.TotalFilecoin // PARAM_FINISH +} + +func DealClientCollateralBounds(pieceSize abi.PaddedPieceSize, duration abi.ChainEpoch) (min abi.TokenAmount, max abi.TokenAmount) { + return abi.NewTokenAmount(0), builtin.TotalFilecoin // PARAM_FINISH +} + +// Penalty to provider deal collateral if the deadline expires before sector commitment. +func collateralPenaltyForDealActivationMissed(providerCollateral abi.TokenAmount) abi.TokenAmount { + return providerCollateral // PARAM_FINISH +} + +// Computes the weight for a deal proposal, which is a function of its size and duration. +func DealWeight(proposal *DealProposal) abi.DealWeight { + dealDuration := big.NewInt(int64(proposal.Duration())) + dealSize := big.NewIntUnsigned(uint64(proposal.PieceSize)) + dealSpaceTime := big.Mul(dealDuration, dealSize) + return dealSpaceTime +} + +func dealQAPower(dealSize abi.PaddedPieceSize, verified bool) abi.StoragePower { + scaledUpQuality := big.Zero() // nolint:ineffassign + if verified { + scaledUpQuality = big.Lsh(builtin.VerifiedDealWeightMultiplier, builtin.SectorQualityPrecision) + scaledUpQuality = big.Div(scaledUpQuality, builtin.QualityBaseMultiplier) + } else { + scaledUpQuality = big.Lsh(builtin.DealWeightMultiplier, builtin.SectorQualityPrecision) + scaledUpQuality = big.Div(scaledUpQuality, builtin.QualityBaseMultiplier) + } + scaledUpQAPower := big.Mul(scaledUpQuality, big.NewIntUnsigned(uint64(dealSize))) + return big.Rsh(scaledUpQAPower, builtin.SectorQualityPrecision) +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/set_multimap.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/set_multimap.go new file mode 100644 index 0000000000..e6e7b55da6 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/set_multimap.go @@ -0,0 +1,159 @@ +package market + +import ( + "reflect" + + "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-hamt-ipld" + errors "github.com/pkg/errors" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +type SetMultimap struct { + mp *adt.Map + store adt.Store +} + +// Interprets a store as a HAMT-based map of HAMT-based sets with root `r`. +func AsSetMultimap(s adt.Store, r cid.Cid) (*SetMultimap, error) { + m, err := adt.AsMap(s, r) + if err != nil { + return nil, err + } + return &SetMultimap{mp: m, store: s}, nil +} + +// Creates a new map backed by an empty HAMT and flushes it to the store. +func MakeEmptySetMultimap(s adt.Store) *SetMultimap { + m := adt.MakeEmptyMap(s) + return &SetMultimap{m, s} +} + +// Returns the root cid of the underlying HAMT. +func (mm *SetMultimap) Root() (cid.Cid, error) { + return mm.mp.Root() +} + +func (mm *SetMultimap) Put(epoch abi.ChainEpoch, v abi.DealID) error { + // Load the hamt under key, or initialize a new empty one if not found. + k := abi.UIntKey(uint64(epoch)) + set, found, err := mm.get(k) + if err != nil { + return err + } + if !found { + set = adt.MakeEmptySet(mm.store) + } + + // Add to the set. + if err = set.Put(dealKey(v)); err != nil { + return errors.Wrapf(err, "failed to add key to set %v", epoch) + } + + src, err := set.Root() + if err != nil { + return xerrors.Errorf("failed to flush set root: %w", err) + } + // Store the new set root under key. + newSetRoot := cbg.CborCid(src) + err = mm.mp.Put(k, &newSetRoot) + if err != nil { + return errors.Wrapf(err, "failed to store set") + } + return nil +} + +func (mm *SetMultimap) PutMany(epoch abi.ChainEpoch, vs []abi.DealID) error { + // Load the hamt under key, or initialize a new empty one if not found. + k := abi.UIntKey(uint64(epoch)) + set, found, err := mm.get(k) + if err != nil { + return err + } + if !found { + set = adt.MakeEmptySet(mm.store) + } + + // Add to the set. + for _, v := range vs { + if err = set.Put(dealKey(v)); err != nil { + return errors.Wrapf(err, "failed to add key to set %v", epoch) + } + } + + src, err := set.Root() + if err != nil { + return xerrors.Errorf("failed to flush set root: %w", err) + } + // Store the new set root under key. + newSetRoot := cbg.CborCid(src) + err = mm.mp.Put(k, &newSetRoot) + if err != nil { + return errors.Wrapf(err, "failed to store set") + } + return nil +} + +// Removes all values for a key. +func (mm *SetMultimap) RemoveAll(key abi.ChainEpoch) error { + err := mm.mp.Delete(abi.UIntKey(uint64(key))) + if err != nil && !xerrors.Is(err, hamt.ErrNotFound) { + return xerrors.Errorf("failed to delete set key %v: %w", key, err) + } + return nil +} + +// Iterates all entries for a key, iteration halts if the function returns an error. +func (mm *SetMultimap) ForEach(epoch abi.ChainEpoch, fn func(id abi.DealID) error) error { + set, found, err := mm.get(abi.UIntKey(uint64(epoch))) + if err != nil { + return err + } + if found { + return set.ForEach(func(k string) error { + v, err := parseDealKey(k) + if err != nil { + return err + } + return fn(v) + }) + } + return nil +} + +func (mm *SetMultimap) get(key abi.Keyer) (*adt.Set, bool, error) { + var setRoot cbg.CborCid + found, err := mm.mp.Get(key, &setRoot) + if err != nil { + return nil, false, errors.Wrapf(err, "failed to load set key %v", key) + } + var set *adt.Set + if found { + set, err = adt.AsSet(mm.store, cid.Cid(setRoot)) + if err != nil { + return nil, false, err + } + } + return set, found, nil +} + +func dealKey(e abi.DealID) abi.Keyer { + return abi.UIntKey(uint64(e)) +} + +func parseDealKey(s string) (abi.DealID, error) { + key, err := abi.ParseUIntKey(s) + return abi.DealID(key), err +} + +func init() { + // Check that DealID is indeed an unsigned integer to confirm that dealKey is making the right interpretation. + var e abi.DealID + if reflect.TypeOf(e).Kind() != reflect.Uint64 { + panic("incorrect sector number encoding") + } +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/types.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/types.go new file mode 100644 index 0000000000..ee920bebdd --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/market/types.go @@ -0,0 +1,89 @@ +package market + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + . "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +// A specialization of a array to deals. +// It is an error to query for a key that doesn't exist. +type DealArray struct { + *Array +} + +// Interprets a store as balance table with root `r`. +func AsDealProposalArray(s Store, r cid.Cid) (*DealArray, error) { + a, err := AsArray(s, r) + if err != nil { + return nil, err + } + return &DealArray{a}, nil +} + +// Returns the root cid of underlying AMT. +func (t *DealArray) Root() (cid.Cid, error) { + return t.Array.Root() +} + +// Gets the deal for a key. The entry must have been previously initialized. +func (t *DealArray) Get(id abi.DealID) (*DealProposal, bool, error) { + var value DealProposal + found, err := t.Array.Get(uint64(id), &value) + return &value, found, err +} + +func (t *DealArray) Set(k abi.DealID, value *DealProposal) error { + return t.Array.Set(uint64(k), value) +} + +func (t *DealArray) Delete(key uint64) error { + return t.Array.Delete(key) +} + +// A specialization of a array to deals. +// It is an error to query for a key that doesn't exist. +type DealMetaArray struct { + *Array +} + +// Interprets a store as balance table with root `r`. +func AsDealStateArray(s Store, r cid.Cid) (*DealMetaArray, error) { + dsa, err := AsArray(s, r) + if err != nil { + return nil, err + } + + return &DealMetaArray{dsa}, nil +} + +// Returns the root cid of underlying AMT. +func (t *DealMetaArray) Root() (cid.Cid, error) { + return t.Array.Root() +} + +// Gets the deal for a key. The entry must have been previously initialized. +func (t *DealMetaArray) Get(id abi.DealID) (*DealState, bool, error) { + var value DealState + found, err := t.Array.Get(uint64(id), &value) + if err != nil { + return nil, false, err // The errors from Map carry good information, no need to wrap here. + } + if !found { + return &DealState{ + SectorStartEpoch: epochUndefined, + LastUpdatedEpoch: epochUndefined, + SlashEpoch: epochUndefined, + }, false, nil + } + return &value, true, nil +} + +func (t *DealMetaArray) Set(k abi.DealID, value *DealState) error { + return t.Array.Set(uint64(k), value) +} + +func (t *DealMetaArray) Delete(id abi.DealID) error { + return t.Array.Delete(uint64(id)) +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/methods.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/methods.go new file mode 100644 index 0000000000..49ce24eda0 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/methods.go @@ -0,0 +1,107 @@ +package builtin + +import ( + abi "github.com/filecoin-project/go-state-types/abi" +) + +const ( + MethodSend = abi.MethodNum(0) + MethodConstructor = abi.MethodNum(1) +) + +var MethodsAccount = struct { + Constructor abi.MethodNum + PubkeyAddress abi.MethodNum +}{MethodConstructor, 2} + +var MethodsInit = struct { + Constructor abi.MethodNum + Exec abi.MethodNum +}{MethodConstructor, 2} + +var MethodsCron = struct { + Constructor abi.MethodNum + EpochTick abi.MethodNum +}{MethodConstructor, 2} + +var MethodsReward = struct { + Constructor abi.MethodNum + AwardBlockReward abi.MethodNum + ThisEpochReward abi.MethodNum + UpdateNetworkKPI abi.MethodNum +}{MethodConstructor, 2, 3, 4} + +var MethodsMultisig = struct { + Constructor abi.MethodNum + Propose abi.MethodNum + Approve abi.MethodNum + Cancel abi.MethodNum + AddSigner abi.MethodNum + RemoveSigner abi.MethodNum + SwapSigner abi.MethodNum + ChangeNumApprovalsThreshold abi.MethodNum + LockBalance abi.MethodNum +}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9} + +var MethodsPaych = struct { + Constructor abi.MethodNum + UpdateChannelState abi.MethodNum + Settle abi.MethodNum + Collect abi.MethodNum +}{MethodConstructor, 2, 3, 4} + +var MethodsMarket = struct { + Constructor abi.MethodNum + AddBalance abi.MethodNum + WithdrawBalance abi.MethodNum + PublishStorageDeals abi.MethodNum + VerifyDealsForActivation abi.MethodNum + ActivateDeals abi.MethodNum + OnMinerSectorsTerminate abi.MethodNum + ComputeDataCommitment abi.MethodNum + CronTick abi.MethodNum +}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9} + +var MethodsPower = struct { + Constructor abi.MethodNum + CreateMiner abi.MethodNum + UpdateClaimedPower abi.MethodNum + EnrollCronEvent abi.MethodNum + OnEpochTickEnd abi.MethodNum + UpdatePledgeTotal abi.MethodNum + OnConsensusFault abi.MethodNum + SubmitPoRepForBulkVerify abi.MethodNum + CurrentTotalPower abi.MethodNum +}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9} + +var MethodsMiner = struct { + Constructor abi.MethodNum + ControlAddresses abi.MethodNum + ChangeWorkerAddress abi.MethodNum + ChangePeerID abi.MethodNum + SubmitWindowedPoSt abi.MethodNum + PreCommitSector abi.MethodNum + ProveCommitSector abi.MethodNum + ExtendSectorExpiration abi.MethodNum + TerminateSectors abi.MethodNum + DeclareFaults abi.MethodNum + DeclareFaultsRecovered abi.MethodNum + OnDeferredCronEvent abi.MethodNum + CheckSectorProven abi.MethodNum + AddLockedFund abi.MethodNum + ReportConsensusFault abi.MethodNum + WithdrawBalance abi.MethodNum + ConfirmSectorProofsValid abi.MethodNum + ChangeMultiaddrs abi.MethodNum + CompactPartitions abi.MethodNum + CompactSectorNumbers abi.MethodNum +}{MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} + +var MethodsVerifiedRegistry = struct { + Constructor abi.MethodNum + AddVerifier abi.MethodNum + RemoveVerifier abi.MethodNum + AddVerifiedClient abi.MethodNum + UseBytes abi.MethodNum + RestoreBytes abi.MethodNum +}{MethodConstructor, 2, 3, 4, 5, 6} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/cbor_gen.go new file mode 100644 index 0000000000..70e391c624 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/cbor_gen.go @@ -0,0 +1,4110 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package miner + +import ( + "fmt" + "io" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + proof "github.com/filecoin-project/specs-actors/actors/runtime/proof" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufState = []byte{141} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Info (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Info); err != nil { + return xerrors.Errorf("failed to write cid field t.Info: %w", err) + } + + // t.PreCommitDeposits (big.Int) (struct) + if err := t.PreCommitDeposits.MarshalCBOR(w); err != nil { + return err + } + + // t.LockedFunds (big.Int) (struct) + if err := t.LockedFunds.MarshalCBOR(w); err != nil { + return err + } + + // t.VestingFunds (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.VestingFunds); err != nil { + return xerrors.Errorf("failed to write cid field t.VestingFunds: %w", err) + } + + // t.InitialPledgeRequirement (big.Int) (struct) + if err := t.InitialPledgeRequirement.MarshalCBOR(w); err != nil { + return err + } + + // t.PreCommittedSectors (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.PreCommittedSectors); err != nil { + return xerrors.Errorf("failed to write cid field t.PreCommittedSectors: %w", err) + } + + // t.PreCommittedSectorsExpiry (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.PreCommittedSectorsExpiry); err != nil { + return xerrors.Errorf("failed to write cid field t.PreCommittedSectorsExpiry: %w", err) + } + + // t.AllocatedSectors (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.AllocatedSectors); err != nil { + return xerrors.Errorf("failed to write cid field t.AllocatedSectors: %w", err) + } + + // t.Sectors (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Sectors); err != nil { + return xerrors.Errorf("failed to write cid field t.Sectors: %w", err) + } + + // t.ProvingPeriodStart (abi.ChainEpoch) (int64) + if t.ProvingPeriodStart >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ProvingPeriodStart)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ProvingPeriodStart-1)); err != nil { + return err + } + } + + // t.CurrentDeadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrentDeadline)); err != nil { + return err + } + + // t.Deadlines (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Deadlines); err != nil { + return xerrors.Errorf("failed to write cid field t.Deadlines: %w", err) + } + + // t.EarlyTerminations (bitfield.BitField) (struct) + if err := t.EarlyTerminations.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 13 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Info (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Info: %w", err) + } + + t.Info = c + + } + // t.PreCommitDeposits (big.Int) (struct) + + { + + if err := t.PreCommitDeposits.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PreCommitDeposits: %w", err) + } + + } + // t.LockedFunds (big.Int) (struct) + + { + + if err := t.LockedFunds.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.LockedFunds: %w", err) + } + + } + // t.VestingFunds (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.VestingFunds: %w", err) + } + + t.VestingFunds = c + + } + // t.InitialPledgeRequirement (big.Int) (struct) + + { + + if err := t.InitialPledgeRequirement.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.InitialPledgeRequirement: %w", err) + } + + } + // t.PreCommittedSectors (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PreCommittedSectors: %w", err) + } + + t.PreCommittedSectors = c + + } + // t.PreCommittedSectorsExpiry (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PreCommittedSectorsExpiry: %w", err) + } + + t.PreCommittedSectorsExpiry = c + + } + // t.AllocatedSectors (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AllocatedSectors: %w", err) + } + + t.AllocatedSectors = c + + } + // t.Sectors (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Sectors: %w", err) + } + + t.Sectors = c + + } + // t.ProvingPeriodStart (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ProvingPeriodStart = abi.ChainEpoch(extraI) + } + // t.CurrentDeadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CurrentDeadline = uint64(extra) + + } + // t.Deadlines (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Deadlines: %w", err) + } + + t.Deadlines = c + + } + // t.EarlyTerminations (bitfield.BitField) (struct) + + { + + if err := t.EarlyTerminations.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.EarlyTerminations: %w", err) + } + + } + return nil +} + +var lengthBufMinerInfo = []byte{137} + +func (t *MinerInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMinerInfo); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Owner (address.Address) (struct) + if err := t.Owner.MarshalCBOR(w); err != nil { + return err + } + + // t.Worker (address.Address) (struct) + if err := t.Worker.MarshalCBOR(w); err != nil { + return err + } + + // t.ControlAddresses ([]address.Address) (slice) + if len(t.ControlAddresses) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.ControlAddresses was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddresses))); err != nil { + return err + } + for _, v := range t.ControlAddresses { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.PendingWorkerKey (miner.WorkerKeyChange) (struct) + if err := t.PendingWorkerKey.MarshalCBOR(w); err != nil { + return err + } + + // t.PeerId ([]uint8) (slice) + if len(t.PeerId) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.PeerId was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PeerId))); err != nil { + return err + } + + if _, err := w.Write(t.PeerId[:]); err != nil { + return err + } + + // t.Multiaddrs ([][]uint8) (slice) + if len(t.Multiaddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { + return err + } + for _, v := range t.Multiaddrs { + if len(v) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field v was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { + return err + } + + if _, err := w.Write(v[:]); err != nil { + return err + } + } + + // t.SealProofType (abi.RegisteredSealProof) (int64) + if t.SealProofType >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProofType)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProofType-1)); err != nil { + return err + } + } + + // t.SectorSize (abi.SectorSize) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorSize)); err != nil { + return err + } + + // t.WindowPoStPartitionSectors (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WindowPoStPartitionSectors)); err != nil { + return err + } + + return nil +} + +func (t *MinerInfo) UnmarshalCBOR(r io.Reader) error { + *t = MinerInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 9 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Owner (address.Address) (struct) + + { + + if err := t.Owner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Owner: %w", err) + } + + } + // t.Worker (address.Address) (struct) + + { + + if err := t.Worker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Worker: %w", err) + } + + } + // t.ControlAddresses ([]address.Address) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.ControlAddresses: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.ControlAddresses = make([]address.Address, extra) + } + + for i := 0; i < int(extra); i++ { + + var v address.Address + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.ControlAddresses[i] = v + } + + // t.PendingWorkerKey (miner.WorkerKeyChange) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.PendingWorkerKey = new(WorkerKeyChange) + if err := t.PendingWorkerKey.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PendingWorkerKey pointer: %w", err) + } + } + + } + // t.PeerId ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.PeerId: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.PeerId = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.PeerId[:]); err != nil { + return err + } + // t.Multiaddrs ([][]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Multiaddrs = make([][]uint8, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Multiaddrs[i] = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { + return err + } + } + } + + // t.SealProofType (abi.RegisteredSealProof) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SealProofType = abi.RegisteredSealProof(extraI) + } + // t.SectorSize (abi.SectorSize) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorSize = abi.SectorSize(extra) + + } + // t.WindowPoStPartitionSectors (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.WindowPoStPartitionSectors = uint64(extra) + + } + return nil +} + +var lengthBufDeadlines = []byte{129} + +func (t *Deadlines) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufDeadlines); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Due ([48]cid.Cid) (array) + if len(t.Due) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Due was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Due))); err != nil { + return err + } + for _, v := range t.Due { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Due: %w", err) + } + } + return nil +} + +func (t *Deadlines) UnmarshalCBOR(r io.Reader) error { + *t = Deadlines{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Due ([48]cid.Cid) (array) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Due: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra != 48 { + return fmt.Errorf("expected array to have 48 elements") + } + + t.Due = [48]cid.Cid{} + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.Due failed: %w", err) + } + t.Due[i] = c + } + + return nil +} + +var lengthBufDeadline = []byte{135} + +func (t *Deadline) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufDeadline); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Partitions (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Partitions); err != nil { + return xerrors.Errorf("failed to write cid field t.Partitions: %w", err) + } + + // t.ExpirationsEpochs (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.ExpirationsEpochs); err != nil { + return xerrors.Errorf("failed to write cid field t.ExpirationsEpochs: %w", err) + } + + // t.PostSubmissions (bitfield.BitField) (struct) + if err := t.PostSubmissions.MarshalCBOR(w); err != nil { + return err + } + + // t.EarlyTerminations (bitfield.BitField) (struct) + if err := t.EarlyTerminations.MarshalCBOR(w); err != nil { + return err + } + + // t.LiveSectors (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LiveSectors)); err != nil { + return err + } + + // t.TotalSectors (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSectors)); err != nil { + return err + } + + // t.FaultyPower (miner.PowerPair) (struct) + if err := t.FaultyPower.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Deadline) UnmarshalCBOR(r io.Reader) error { + *t = Deadline{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 7 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Partitions (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Partitions: %w", err) + } + + t.Partitions = c + + } + // t.ExpirationsEpochs (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ExpirationsEpochs: %w", err) + } + + t.ExpirationsEpochs = c + + } + // t.PostSubmissions (bitfield.BitField) (struct) + + { + + if err := t.PostSubmissions.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PostSubmissions: %w", err) + } + + } + // t.EarlyTerminations (bitfield.BitField) (struct) + + { + + if err := t.EarlyTerminations.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.EarlyTerminations: %w", err) + } + + } + // t.LiveSectors (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.LiveSectors = uint64(extra) + + } + // t.TotalSectors (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSectors = uint64(extra) + + } + // t.FaultyPower (miner.PowerPair) (struct) + + { + + if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) + } + + } + return nil +} + +var lengthBufPartition = []byte{137} + +func (t *Partition) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufPartition); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Sectors (bitfield.BitField) (struct) + if err := t.Sectors.MarshalCBOR(w); err != nil { + return err + } + + // t.Faults (bitfield.BitField) (struct) + if err := t.Faults.MarshalCBOR(w); err != nil { + return err + } + + // t.Recoveries (bitfield.BitField) (struct) + if err := t.Recoveries.MarshalCBOR(w); err != nil { + return err + } + + // t.Terminated (bitfield.BitField) (struct) + if err := t.Terminated.MarshalCBOR(w); err != nil { + return err + } + + // t.ExpirationsEpochs (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.ExpirationsEpochs); err != nil { + return xerrors.Errorf("failed to write cid field t.ExpirationsEpochs: %w", err) + } + + // t.EarlyTerminated (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.EarlyTerminated); err != nil { + return xerrors.Errorf("failed to write cid field t.EarlyTerminated: %w", err) + } + + // t.LivePower (miner.PowerPair) (struct) + if err := t.LivePower.MarshalCBOR(w); err != nil { + return err + } + + // t.FaultyPower (miner.PowerPair) (struct) + if err := t.FaultyPower.MarshalCBOR(w); err != nil { + return err + } + + // t.RecoveringPower (miner.PowerPair) (struct) + if err := t.RecoveringPower.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Partition) UnmarshalCBOR(r io.Reader) error { + *t = Partition{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 9 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Sectors (bitfield.BitField) (struct) + + { + + if err := t.Sectors.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Sectors: %w", err) + } + + } + // t.Faults (bitfield.BitField) (struct) + + { + + if err := t.Faults.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Faults: %w", err) + } + + } + // t.Recoveries (bitfield.BitField) (struct) + + { + + if err := t.Recoveries.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Recoveries: %w", err) + } + + } + // t.Terminated (bitfield.BitField) (struct) + + { + + if err := t.Terminated.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Terminated: %w", err) + } + + } + // t.ExpirationsEpochs (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ExpirationsEpochs: %w", err) + } + + t.ExpirationsEpochs = c + + } + // t.EarlyTerminated (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.EarlyTerminated: %w", err) + } + + t.EarlyTerminated = c + + } + // t.LivePower (miner.PowerPair) (struct) + + { + + if err := t.LivePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.LivePower: %w", err) + } + + } + // t.FaultyPower (miner.PowerPair) (struct) + + { + + if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) + } + + } + // t.RecoveringPower (miner.PowerPair) (struct) + + { + + if err := t.RecoveringPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RecoveringPower: %w", err) + } + + } + return nil +} + +var lengthBufExpirationSet = []byte{133} + +func (t *ExpirationSet) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufExpirationSet); err != nil { + return err + } + + // t.OnTimeSectors (bitfield.BitField) (struct) + if err := t.OnTimeSectors.MarshalCBOR(w); err != nil { + return err + } + + // t.EarlySectors (bitfield.BitField) (struct) + if err := t.EarlySectors.MarshalCBOR(w); err != nil { + return err + } + + // t.OnTimePledge (big.Int) (struct) + if err := t.OnTimePledge.MarshalCBOR(w); err != nil { + return err + } + + // t.ActivePower (miner.PowerPair) (struct) + if err := t.ActivePower.MarshalCBOR(w); err != nil { + return err + } + + // t.FaultyPower (miner.PowerPair) (struct) + if err := t.FaultyPower.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ExpirationSet) UnmarshalCBOR(r io.Reader) error { + *t = ExpirationSet{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.OnTimeSectors (bitfield.BitField) (struct) + + { + + if err := t.OnTimeSectors.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.OnTimeSectors: %w", err) + } + + } + // t.EarlySectors (bitfield.BitField) (struct) + + { + + if err := t.EarlySectors.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.EarlySectors: %w", err) + } + + } + // t.OnTimePledge (big.Int) (struct) + + { + + if err := t.OnTimePledge.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.OnTimePledge: %w", err) + } + + } + // t.ActivePower (miner.PowerPair) (struct) + + { + + if err := t.ActivePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ActivePower: %w", err) + } + + } + // t.FaultyPower (miner.PowerPair) (struct) + + { + + if err := t.FaultyPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FaultyPower: %w", err) + } + + } + return nil +} + +var lengthBufPowerPair = []byte{130} + +func (t *PowerPair) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufPowerPair); err != nil { + return err + } + + // t.Raw (big.Int) (struct) + if err := t.Raw.MarshalCBOR(w); err != nil { + return err + } + + // t.QA (big.Int) (struct) + if err := t.QA.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *PowerPair) UnmarshalCBOR(r io.Reader) error { + *t = PowerPair{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Raw (big.Int) (struct) + + { + + if err := t.Raw.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Raw: %w", err) + } + + } + // t.QA (big.Int) (struct) + + { + + if err := t.QA.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.QA: %w", err) + } + + } + return nil +} + +var lengthBufSectorPreCommitOnChainInfo = []byte{133} + +func (t *SectorPreCommitOnChainInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSectorPreCommitOnChainInfo); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Info (miner.SectorPreCommitInfo) (struct) + if err := t.Info.MarshalCBOR(w); err != nil { + return err + } + + // t.PreCommitDeposit (big.Int) (struct) + if err := t.PreCommitDeposit.MarshalCBOR(w); err != nil { + return err + } + + // t.PreCommitEpoch (abi.ChainEpoch) (int64) + if t.PreCommitEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PreCommitEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.PreCommitEpoch-1)); err != nil { + return err + } + } + + // t.DealWeight (big.Int) (struct) + if err := t.DealWeight.MarshalCBOR(w); err != nil { + return err + } + + // t.VerifiedDealWeight (big.Int) (struct) + if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *SectorPreCommitOnChainInfo) UnmarshalCBOR(r io.Reader) error { + *t = SectorPreCommitOnChainInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Info (miner.SectorPreCommitInfo) (struct) + + { + + if err := t.Info.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Info: %w", err) + } + + } + // t.PreCommitDeposit (big.Int) (struct) + + { + + if err := t.PreCommitDeposit.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PreCommitDeposit: %w", err) + } + + } + // t.PreCommitEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.PreCommitEpoch = abi.ChainEpoch(extraI) + } + // t.DealWeight (big.Int) (struct) + + { + + if err := t.DealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) + } + + } + // t.VerifiedDealWeight (big.Int) (struct) + + { + + if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) + } + + } + return nil +} + +var lengthBufSectorPreCommitInfo = []byte{138} + +func (t *SectorPreCommitInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSectorPreCommitInfo); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.SealProof (abi.RegisteredSealProof) (int64) + if t.SealProof >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { + return err + } + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.SealedCID (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { + return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) + } + + // t.SealRandEpoch (abi.ChainEpoch) (int64) + if t.SealRandEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealRandEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealRandEpoch-1)); err != nil { + return err + } + } + + // t.DealIDs ([]abi.DealID) (slice) + if len(t.DealIDs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.DealIDs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { + return err + } + for _, v := range t.DealIDs { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + + // t.Expiration (abi.ChainEpoch) (int64) + if t.Expiration >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { + return err + } + } + + // t.ReplaceCapacity (bool) (bool) + if err := cbg.WriteBool(w, t.ReplaceCapacity); err != nil { + return err + } + + // t.ReplaceSectorDeadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorDeadline)); err != nil { + return err + } + + // t.ReplaceSectorPartition (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorPartition)); err != nil { + return err + } + + // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ReplaceSectorNumber)); err != nil { + return err + } + + return nil +} + +func (t *SectorPreCommitInfo) UnmarshalCBOR(r io.Reader) error { + *t = SectorPreCommitInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 10 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.SealProof (abi.RegisteredSealProof) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SealProof = abi.RegisteredSealProof(extraI) + } + // t.SectorNumber (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.SealedCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) + } + + t.SealedCID = c + + } + // t.SealRandEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SealRandEpoch = abi.ChainEpoch(extraI) + } + // t.DealIDs ([]abi.DealID) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.DealIDs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.DealIDs = make([]abi.DealID, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) + } + + t.DealIDs[i] = abi.DealID(val) + } + + // t.Expiration (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiration = abi.ChainEpoch(extraI) + } + // t.ReplaceCapacity (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.ReplaceCapacity = false + case 21: + t.ReplaceCapacity = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.ReplaceSectorDeadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ReplaceSectorDeadline = uint64(extra) + + } + // t.ReplaceSectorPartition (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ReplaceSectorPartition = uint64(extra) + + } + // t.ReplaceSectorNumber (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ReplaceSectorNumber = abi.SectorNumber(extra) + + } + return nil +} + +var lengthBufSectorOnChainInfo = []byte{139} + +func (t *SectorOnChainInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSectorOnChainInfo); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.SectorNumber (abi.SectorNumber) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.SealProof (abi.RegisteredSealProof) (int64) + if t.SealProof >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProof)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProof-1)); err != nil { + return err + } + } + + // t.SealedCID (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.SealedCID); err != nil { + return xerrors.Errorf("failed to write cid field t.SealedCID: %w", err) + } + + // t.DealIDs ([]abi.DealID) (slice) + if len(t.DealIDs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.DealIDs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.DealIDs))); err != nil { + return err + } + for _, v := range t.DealIDs { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + + // t.Activation (abi.ChainEpoch) (int64) + if t.Activation >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Activation)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Activation-1)); err != nil { + return err + } + } + + // t.Expiration (abi.ChainEpoch) (int64) + if t.Expiration >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiration)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiration-1)); err != nil { + return err + } + } + + // t.DealWeight (big.Int) (struct) + if err := t.DealWeight.MarshalCBOR(w); err != nil { + return err + } + + // t.VerifiedDealWeight (big.Int) (struct) + if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { + return err + } + + // t.InitialPledge (big.Int) (struct) + if err := t.InitialPledge.MarshalCBOR(w); err != nil { + return err + } + + // t.ExpectedDayReward (big.Int) (struct) + if err := t.ExpectedDayReward.MarshalCBOR(w); err != nil { + return err + } + + // t.ExpectedStoragePledge (big.Int) (struct) + if err := t.ExpectedStoragePledge.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *SectorOnChainInfo) UnmarshalCBOR(r io.Reader) error { + *t = SectorOnChainInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 11 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.SealProof (abi.RegisteredSealProof) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SealProof = abi.RegisteredSealProof(extraI) + } + // t.SealedCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.SealedCID: %w", err) + } + + t.SealedCID = c + + } + // t.DealIDs ([]abi.DealID) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.DealIDs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.DealIDs = make([]abi.DealID, extra) + } + + for i := 0; i < int(extra); i++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.DealIDs slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.DealIDs was not a uint, instead got %d", maj) + } + + t.DealIDs[i] = abi.DealID(val) + } + + // t.Activation (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Activation = abi.ChainEpoch(extraI) + } + // t.Expiration (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiration = abi.ChainEpoch(extraI) + } + // t.DealWeight (big.Int) (struct) + + { + + if err := t.DealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) + } + + } + // t.VerifiedDealWeight (big.Int) (struct) + + { + + if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) + } + + } + // t.InitialPledge (big.Int) (struct) + + { + + if err := t.InitialPledge.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.InitialPledge: %w", err) + } + + } + // t.ExpectedDayReward (big.Int) (struct) + + { + + if err := t.ExpectedDayReward.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ExpectedDayReward: %w", err) + } + + } + // t.ExpectedStoragePledge (big.Int) (struct) + + { + + if err := t.ExpectedStoragePledge.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ExpectedStoragePledge: %w", err) + } + + } + return nil +} + +var lengthBufWorkerKeyChange = []byte{130} + +func (t *WorkerKeyChange) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufWorkerKeyChange); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.NewWorker (address.Address) (struct) + if err := t.NewWorker.MarshalCBOR(w); err != nil { + return err + } + + // t.EffectiveAt (abi.ChainEpoch) (int64) + if t.EffectiveAt >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EffectiveAt)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EffectiveAt-1)); err != nil { + return err + } + } + return nil +} + +func (t *WorkerKeyChange) UnmarshalCBOR(r io.Reader) error { + *t = WorkerKeyChange{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.NewWorker (address.Address) (struct) + + { + + if err := t.NewWorker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.NewWorker: %w", err) + } + + } + // t.EffectiveAt (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EffectiveAt = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufVestingFunds = []byte{129} + +func (t *VestingFunds) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufVestingFunds); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Funds ([]miner.VestingFund) (slice) + if len(t.Funds) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Funds was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Funds))); err != nil { + return err + } + for _, v := range t.Funds { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *VestingFunds) UnmarshalCBOR(r io.Reader) error { + *t = VestingFunds{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Funds ([]miner.VestingFund) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Funds: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Funds = make([]VestingFund, extra) + } + + for i := 0; i < int(extra); i++ { + + var v VestingFund + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Funds[i] = v + } + + return nil +} + +var lengthBufVestingFund = []byte{130} + +func (t *VestingFund) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufVestingFund); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Epoch (abi.ChainEpoch) (int64) + if t.Epoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + + // t.Amount (big.Int) (struct) + if err := t.Amount.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *VestingFund) UnmarshalCBOR(r io.Reader) error { + *t = VestingFund{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Epoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + // t.Amount (big.Int) (struct) + + { + + if err := t.Amount.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Amount: %w", err) + } + + } + return nil +} + +var lengthBufSubmitWindowedPoStParams = []byte{133} + +func (t *SubmitWindowedPoStParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSubmitWindowedPoStParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { + return err + } + + // t.Partitions ([]miner.PoStPartition) (slice) + if len(t.Partitions) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Partitions was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Partitions))); err != nil { + return err + } + for _, v := range t.Partitions { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.Proofs ([]proof.PoStProof) (slice) + if len(t.Proofs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Proofs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Proofs))); err != nil { + return err + } + for _, v := range t.Proofs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.ChainCommitEpoch (abi.ChainEpoch) (int64) + if t.ChainCommitEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ChainCommitEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.ChainCommitEpoch-1)); err != nil { + return err + } + } + + // t.ChainCommitRand (abi.Randomness) (slice) + if len(t.ChainCommitRand) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.ChainCommitRand was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ChainCommitRand))); err != nil { + return err + } + + if _, err := w.Write(t.ChainCommitRand[:]); err != nil { + return err + } + return nil +} + +func (t *SubmitWindowedPoStParams) UnmarshalCBOR(r io.Reader) error { + *t = SubmitWindowedPoStParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Deadline = uint64(extra) + + } + // t.Partitions ([]miner.PoStPartition) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Partitions: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Partitions = make([]PoStPartition, extra) + } + + for i := 0; i < int(extra); i++ { + + var v PoStPartition + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Partitions[i] = v + } + + // t.Proofs ([]proof.PoStProof) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Proofs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Proofs = make([]proof.PoStProof, extra) + } + + for i := 0; i < int(extra); i++ { + + var v proof.PoStProof + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Proofs[i] = v + } + + // t.ChainCommitEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.ChainCommitEpoch = abi.ChainEpoch(extraI) + } + // t.ChainCommitRand (abi.Randomness) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.ChainCommitRand: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.ChainCommitRand = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.ChainCommitRand[:]); err != nil { + return err + } + return nil +} + +var lengthBufTerminateSectorsParams = []byte{129} + +func (t *TerminateSectorsParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufTerminateSectorsParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Terminations ([]miner.TerminationDeclaration) (slice) + if len(t.Terminations) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Terminations was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Terminations))); err != nil { + return err + } + for _, v := range t.Terminations { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *TerminateSectorsParams) UnmarshalCBOR(r io.Reader) error { + *t = TerminateSectorsParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Terminations ([]miner.TerminationDeclaration) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Terminations: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Terminations = make([]TerminationDeclaration, extra) + } + + for i := 0; i < int(extra); i++ { + + var v TerminationDeclaration + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Terminations[i] = v + } + + return nil +} + +var lengthBufTerminateSectorsReturn = []byte{129} + +func (t *TerminateSectorsReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufTerminateSectorsReturn); err != nil { + return err + } + + // t.Done (bool) (bool) + if err := cbg.WriteBool(w, t.Done); err != nil { + return err + } + return nil +} + +func (t *TerminateSectorsReturn) UnmarshalCBOR(r io.Reader) error { + *t = TerminateSectorsReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Done (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Done = false + case 21: + t.Done = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + return nil +} + +var lengthBufChangePeerIDParams = []byte{129} + +func (t *ChangePeerIDParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufChangePeerIDParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.NewID ([]uint8) (slice) + if len(t.NewID) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.NewID was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.NewID))); err != nil { + return err + } + + if _, err := w.Write(t.NewID[:]); err != nil { + return err + } + return nil +} + +func (t *ChangePeerIDParams) UnmarshalCBOR(r io.Reader) error { + *t = ChangePeerIDParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.NewID ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.NewID: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.NewID = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.NewID[:]); err != nil { + return err + } + return nil +} + +var lengthBufChangeMultiaddrsParams = []byte{129} + +func (t *ChangeMultiaddrsParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufChangeMultiaddrsParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.NewMultiaddrs ([][]uint8) (slice) + if len(t.NewMultiaddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.NewMultiaddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.NewMultiaddrs))); err != nil { + return err + } + for _, v := range t.NewMultiaddrs { + if len(v) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field v was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { + return err + } + + if _, err := w.Write(v[:]); err != nil { + return err + } + } + return nil +} + +func (t *ChangeMultiaddrsParams) UnmarshalCBOR(r io.Reader) error { + *t = ChangeMultiaddrsParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.NewMultiaddrs ([][]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.NewMultiaddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.NewMultiaddrs = make([][]uint8, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.NewMultiaddrs[i]: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.NewMultiaddrs[i] = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.NewMultiaddrs[i][:]); err != nil { + return err + } + } + } + + return nil +} + +var lengthBufProveCommitSectorParams = []byte{130} + +func (t *ProveCommitSectorParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufProveCommitSectorParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.SectorNumber (abi.SectorNumber) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + // t.Proof ([]uint8) (slice) + if len(t.Proof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Proof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Proof))); err != nil { + return err + } + + if _, err := w.Write(t.Proof[:]); err != nil { + return err + } + return nil +} + +func (t *ProveCommitSectorParams) UnmarshalCBOR(r io.Reader) error { + *t = ProveCommitSectorParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + // t.Proof ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Proof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Proof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Proof[:]); err != nil { + return err + } + return nil +} + +var lengthBufChangeWorkerAddressParams = []byte{130} + +func (t *ChangeWorkerAddressParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufChangeWorkerAddressParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.NewWorker (address.Address) (struct) + if err := t.NewWorker.MarshalCBOR(w); err != nil { + return err + } + + // t.NewControlAddrs ([]address.Address) (slice) + if len(t.NewControlAddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.NewControlAddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.NewControlAddrs))); err != nil { + return err + } + for _, v := range t.NewControlAddrs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *ChangeWorkerAddressParams) UnmarshalCBOR(r io.Reader) error { + *t = ChangeWorkerAddressParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.NewWorker (address.Address) (struct) + + { + + if err := t.NewWorker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.NewWorker: %w", err) + } + + } + // t.NewControlAddrs ([]address.Address) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.NewControlAddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.NewControlAddrs = make([]address.Address, extra) + } + + for i := 0; i < int(extra); i++ { + + var v address.Address + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.NewControlAddrs[i] = v + } + + return nil +} + +var lengthBufExtendSectorExpirationParams = []byte{129} + +func (t *ExtendSectorExpirationParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufExtendSectorExpirationParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Extensions ([]miner.ExpirationExtension) (slice) + if len(t.Extensions) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Extensions was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Extensions))); err != nil { + return err + } + for _, v := range t.Extensions { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *ExtendSectorExpirationParams) UnmarshalCBOR(r io.Reader) error { + *t = ExtendSectorExpirationParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Extensions ([]miner.ExpirationExtension) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Extensions: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Extensions = make([]ExpirationExtension, extra) + } + + for i := 0; i < int(extra); i++ { + + var v ExpirationExtension + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Extensions[i] = v + } + + return nil +} + +var lengthBufDeclareFaultsParams = []byte{129} + +func (t *DeclareFaultsParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufDeclareFaultsParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Faults ([]miner.FaultDeclaration) (slice) + if len(t.Faults) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Faults was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Faults))); err != nil { + return err + } + for _, v := range t.Faults { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *DeclareFaultsParams) UnmarshalCBOR(r io.Reader) error { + *t = DeclareFaultsParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Faults ([]miner.FaultDeclaration) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Faults: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Faults = make([]FaultDeclaration, extra) + } + + for i := 0; i < int(extra); i++ { + + var v FaultDeclaration + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Faults[i] = v + } + + return nil +} + +var lengthBufDeclareFaultsRecoveredParams = []byte{129} + +func (t *DeclareFaultsRecoveredParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufDeclareFaultsRecoveredParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Recoveries ([]miner.RecoveryDeclaration) (slice) + if len(t.Recoveries) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Recoveries was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Recoveries))); err != nil { + return err + } + for _, v := range t.Recoveries { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *DeclareFaultsRecoveredParams) UnmarshalCBOR(r io.Reader) error { + *t = DeclareFaultsRecoveredParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Recoveries ([]miner.RecoveryDeclaration) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Recoveries: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Recoveries = make([]RecoveryDeclaration, extra) + } + + for i := 0; i < int(extra); i++ { + + var v RecoveryDeclaration + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Recoveries[i] = v + } + + return nil +} + +var lengthBufReportConsensusFaultParams = []byte{131} + +func (t *ReportConsensusFaultParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufReportConsensusFaultParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.BlockHeader1 ([]uint8) (slice) + if len(t.BlockHeader1) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.BlockHeader1 was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeader1))); err != nil { + return err + } + + if _, err := w.Write(t.BlockHeader1[:]); err != nil { + return err + } + + // t.BlockHeader2 ([]uint8) (slice) + if len(t.BlockHeader2) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.BlockHeader2 was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeader2))); err != nil { + return err + } + + if _, err := w.Write(t.BlockHeader2[:]); err != nil { + return err + } + + // t.BlockHeaderExtra ([]uint8) (slice) + if len(t.BlockHeaderExtra) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.BlockHeaderExtra was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.BlockHeaderExtra))); err != nil { + return err + } + + if _, err := w.Write(t.BlockHeaderExtra[:]); err != nil { + return err + } + return nil +} + +func (t *ReportConsensusFaultParams) UnmarshalCBOR(r io.Reader) error { + *t = ReportConsensusFaultParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.BlockHeader1 ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.BlockHeader1: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.BlockHeader1 = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.BlockHeader1[:]); err != nil { + return err + } + // t.BlockHeader2 ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.BlockHeader2: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.BlockHeader2 = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.BlockHeader2[:]); err != nil { + return err + } + // t.BlockHeaderExtra ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.BlockHeaderExtra: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.BlockHeaderExtra = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.BlockHeaderExtra[:]); err != nil { + return err + } + return nil +} + +var lengthBufGetControlAddressesReturn = []byte{131} + +func (t *GetControlAddressesReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufGetControlAddressesReturn); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Owner (address.Address) (struct) + if err := t.Owner.MarshalCBOR(w); err != nil { + return err + } + + // t.Worker (address.Address) (struct) + if err := t.Worker.MarshalCBOR(w); err != nil { + return err + } + + // t.ControlAddrs ([]address.Address) (slice) + if len(t.ControlAddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.ControlAddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddrs))); err != nil { + return err + } + for _, v := range t.ControlAddrs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *GetControlAddressesReturn) UnmarshalCBOR(r io.Reader) error { + *t = GetControlAddressesReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Owner (address.Address) (struct) + + { + + if err := t.Owner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Owner: %w", err) + } + + } + // t.Worker (address.Address) (struct) + + { + + if err := t.Worker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Worker: %w", err) + } + + } + // t.ControlAddrs ([]address.Address) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.ControlAddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.ControlAddrs = make([]address.Address, extra) + } + + for i := 0; i < int(extra); i++ { + + var v address.Address + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.ControlAddrs[i] = v + } + + return nil +} + +var lengthBufCheckSectorProvenParams = []byte{129} + +func (t *CheckSectorProvenParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCheckSectorProvenParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.SectorNumber (abi.SectorNumber) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorNumber)); err != nil { + return err + } + + return nil +} + +func (t *CheckSectorProvenParams) UnmarshalCBOR(r io.Reader) error { + *t = CheckSectorProvenParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.SectorNumber (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorNumber = abi.SectorNumber(extra) + + } + return nil +} + +var lengthBufWithdrawBalanceParams = []byte{129} + +func (t *WithdrawBalanceParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufWithdrawBalanceParams); err != nil { + return err + } + + // t.AmountRequested (big.Int) (struct) + if err := t.AmountRequested.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *WithdrawBalanceParams) UnmarshalCBOR(r io.Reader) error { + *t = WithdrawBalanceParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.AmountRequested (big.Int) (struct) + + { + + if err := t.AmountRequested.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.AmountRequested: %w", err) + } + + } + return nil +} + +var lengthBufCompactPartitionsParams = []byte{130} + +func (t *CompactPartitionsParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCompactPartitionsParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { + return err + } + + // t.Partitions (bitfield.BitField) (struct) + if err := t.Partitions.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *CompactPartitionsParams) UnmarshalCBOR(r io.Reader) error { + *t = CompactPartitionsParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Deadline = uint64(extra) + + } + // t.Partitions (bitfield.BitField) (struct) + + { + + if err := t.Partitions.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Partitions: %w", err) + } + + } + return nil +} + +var lengthBufCompactSectorNumbersParams = []byte{129} + +func (t *CompactSectorNumbersParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCompactSectorNumbersParams); err != nil { + return err + } + + // t.MaskSectorNumbers (bitfield.BitField) (struct) + if err := t.MaskSectorNumbers.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *CompactSectorNumbersParams) UnmarshalCBOR(r io.Reader) error { + *t = CompactSectorNumbersParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.MaskSectorNumbers (bitfield.BitField) (struct) + + { + + if err := t.MaskSectorNumbers.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.MaskSectorNumbers: %w", err) + } + + } + return nil +} + +var lengthBufCronEventPayload = []byte{129} + +func (t *CronEventPayload) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCronEventPayload); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.EventType (miner.CronEventType) (int64) + if t.EventType >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EventType)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EventType-1)); err != nil { + return err + } + } + return nil +} + +func (t *CronEventPayload) UnmarshalCBOR(r io.Reader) error { + *t = CronEventPayload{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.EventType (miner.CronEventType) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EventType = CronEventType(extraI) + } + return nil +} + +var lengthBufFaultDeclaration = []byte{131} + +func (t *FaultDeclaration) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufFaultDeclaration); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { + return err + } + + // t.Partition (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { + return err + } + + // t.Sectors (bitfield.BitField) (struct) + if err := t.Sectors.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *FaultDeclaration) UnmarshalCBOR(r io.Reader) error { + *t = FaultDeclaration{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Deadline = uint64(extra) + + } + // t.Partition (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Partition = uint64(extra) + + } + // t.Sectors (bitfield.BitField) (struct) + + { + + if err := t.Sectors.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Sectors: %w", err) + } + + } + return nil +} + +var lengthBufRecoveryDeclaration = []byte{131} + +func (t *RecoveryDeclaration) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufRecoveryDeclaration); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { + return err + } + + // t.Partition (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { + return err + } + + // t.Sectors (bitfield.BitField) (struct) + if err := t.Sectors.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *RecoveryDeclaration) UnmarshalCBOR(r io.Reader) error { + *t = RecoveryDeclaration{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Deadline = uint64(extra) + + } + // t.Partition (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Partition = uint64(extra) + + } + // t.Sectors (bitfield.BitField) (struct) + + { + + if err := t.Sectors.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Sectors: %w", err) + } + + } + return nil +} + +var lengthBufExpirationExtension = []byte{132} + +func (t *ExpirationExtension) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufExpirationExtension); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { + return err + } + + // t.Partition (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { + return err + } + + // t.Sectors (bitfield.BitField) (struct) + if err := t.Sectors.MarshalCBOR(w); err != nil { + return err + } + + // t.NewExpiration (abi.ChainEpoch) (int64) + if t.NewExpiration >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.NewExpiration)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.NewExpiration-1)); err != nil { + return err + } + } + return nil +} + +func (t *ExpirationExtension) UnmarshalCBOR(r io.Reader) error { + *t = ExpirationExtension{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Deadline = uint64(extra) + + } + // t.Partition (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Partition = uint64(extra) + + } + // t.Sectors (bitfield.BitField) (struct) + + { + + if err := t.Sectors.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Sectors: %w", err) + } + + } + // t.NewExpiration (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.NewExpiration = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufTerminationDeclaration = []byte{131} + +func (t *TerminationDeclaration) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufTerminationDeclaration); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Deadline (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Deadline)); err != nil { + return err + } + + // t.Partition (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Partition)); err != nil { + return err + } + + // t.Sectors (bitfield.BitField) (struct) + if err := t.Sectors.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *TerminationDeclaration) UnmarshalCBOR(r io.Reader) error { + *t = TerminationDeclaration{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Deadline (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Deadline = uint64(extra) + + } + // t.Partition (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Partition = uint64(extra) + + } + // t.Sectors (bitfield.BitField) (struct) + + { + + if err := t.Sectors.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Sectors: %w", err) + } + + } + return nil +} + +var lengthBufPoStPartition = []byte{130} + +func (t *PoStPartition) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufPoStPartition); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Index (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Index)); err != nil { + return err + } + + // t.Skipped (bitfield.BitField) (struct) + if err := t.Skipped.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *PoStPartition) UnmarshalCBOR(r io.Reader) error { + *t = PoStPartition{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Index (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Index = uint64(extra) + + } + // t.Skipped (bitfield.BitField) (struct) + + { + + if err := t.Skipped.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Skipped: %w", err) + } + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/deadlines.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/deadlines.go new file mode 100644 index 0000000000..acb1928e4f --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/deadlines.go @@ -0,0 +1,68 @@ +package miner + +import ( + "errors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +// Returns deadline-related calculations for a deadline in some proving period and the current epoch. +func NewDeadlineInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch) *dline.Info { + return dline.NewInfo(periodStart, deadlineIdx, currEpoch, WPoStPeriodDeadlines, WPoStProvingPeriod, WPoStChallengeWindow, WPoStChallengeLookback, FaultDeclarationCutoff) +} + +func QuantSpecForDeadline(di *dline.Info) QuantSpec { + return NewQuantSpec(WPoStProvingPeriod, di.Last()) +} + +// FindSector returns the deadline and partition index for a sector number. +// It returns an error if the sector number is not tracked by deadlines. +func FindSector(store adt.Store, deadlines *Deadlines, sectorNum abi.SectorNumber) (uint64, uint64, error) { + for dlIdx := range deadlines.Due { + dl, err := deadlines.LoadDeadline(store, uint64(dlIdx)) + if err != nil { + return 0, 0, err + } + + partitions, err := adt.AsArray(store, dl.Partitions) + if err != nil { + return 0, 0, err + } + var partition Partition + + partIdx := uint64(0) + stopErr := errors.New("stop") + err = partitions.ForEach(&partition, func(i int64) error { + found, err := partition.Sectors.IsSet(uint64(sectorNum)) + if err != nil { + return err + } + if found { + partIdx = uint64(i) + return stopErr + } + return nil + }) + if err == stopErr { + return uint64(dlIdx), partIdx, nil + } else if err != nil { + return 0, 0, err + } + + } + return 0, 0, xerrors.Errorf("sector %d not due at any deadline", sectorNum) +} + +// Returns true if the deadline at the given index is currently mutable. +func deadlineIsMutable(provingPeriodStart abi.ChainEpoch, dlIdx uint64, currentEpoch abi.ChainEpoch) bool { + // Get the next non-elapsed deadline (i.e., the next time we care about + // mutations to the deadline). + dlInfo := NewDeadlineInfo(provingPeriodStart, dlIdx, currentEpoch).NextNotElapsed() + // Ensure that the current epoch is at least one challenge window before + // that deadline opens. + return currentEpoch < dlInfo.Open-WPoStChallengeWindow +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/miner_actor.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/miner_actor.go new file mode 100644 index 0000000000..f1dbb17e1f --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/miner_actor.go @@ -0,0 +1,2286 @@ +package miner + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + rtt "github.com/filecoin-project/go-state-types/rt" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/builtin/market" + "github.com/filecoin-project/specs-actors/actors/builtin/power" + "github.com/filecoin-project/specs-actors/actors/builtin/reward" + "github.com/filecoin-project/specs-actors/actors/runtime" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +type Runtime = runtime.Runtime + +type CronEventType int64 + +const ( + CronEventWorkerKeyChange CronEventType = iota + CronEventProvingDeadline + CronEventProcessEarlyTerminations +) + +type CronEventPayload struct { + EventType CronEventType +} + +// Identifier for a single partition within a miner. +type PartitionKey struct { + Deadline uint64 + Partition uint64 +} + +type Actor struct{} + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.ControlAddresses, + 3: a.ChangeWorkerAddress, + 4: a.ChangePeerID, + 5: a.SubmitWindowedPoSt, + 6: a.PreCommitSector, + 7: a.ProveCommitSector, + 8: a.ExtendSectorExpiration, + 9: a.TerminateSectors, + 10: a.DeclareFaults, + 11: a.DeclareFaultsRecovered, + 12: a.OnDeferredCronEvent, + 13: a.CheckSectorProven, + 14: a.AddLockedFund, + 15: a.ReportConsensusFault, + 16: a.WithdrawBalance, + 17: a.ConfirmSectorProofsValid, + 18: a.ChangeMultiaddrs, + 19: a.CompactPartitions, + 20: a.CompactSectorNumbers, + } +} + +func (a Actor) Code() cid.Cid { + return builtin.StorageMinerActorCodeID +} + +func (a Actor) State() cbor.Er { + return new(State) +} + +var _ runtime.VMActor = Actor{} + +///////////////// +// Constructor // +///////////////// + +// Storage miner actors are created exclusively by the storage power actor. In order to break a circular dependency +// between the two, the construction parameters are defined in the power actor. +type ConstructorParams = power.MinerConstructorParams + +func (a Actor) Constructor(rt Runtime, params *ConstructorParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.InitActorAddr) + + _, ok := SupportedProofTypes[params.SealProofType] + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "proof type %d not allowed for new miner actors", params.SealProofType) + } + + owner := resolveControlAddress(rt, params.OwnerAddr) + worker := resolveWorkerAddress(rt, params.WorkerAddr) + controlAddrs := make([]addr.Address, 0, len(params.ControlAddrs)) + for _, ca := range params.ControlAddrs { + resolved := resolveControlAddress(rt, ca) + controlAddrs = append(controlAddrs, resolved) + } + + emptyMap, err := adt.MakeEmptyMap(adt.AsStore(rt)).Root() + if err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to construct initial state: %v", err) + } + + emptyArray, err := adt.MakeEmptyArray(adt.AsStore(rt)).Root() + if err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to construct initial state: %v", err) + } + + emptyBitfield := bitfield.NewFromSet(nil) + emptyBitfieldCid := rt.StorePut(emptyBitfield) + + emptyDeadline := ConstructDeadline(emptyArray) + emptyDeadlineCid := rt.StorePut(emptyDeadline) + + emptyDeadlines := ConstructDeadlines(emptyDeadlineCid) + emptyVestingFunds := ConstructVestingFunds() + emptyDeadlinesCid := rt.StorePut(emptyDeadlines) + emptyVestingFundsCid := rt.StorePut(emptyVestingFunds) + + currEpoch := rt.CurrEpoch() + offset, err := assignProvingPeriodOffset(rt.Receiver(), currEpoch, rt.HashBlake2b) + builtin.RequireNoErr(rt, err, exitcode.ErrSerialization, "failed to assign proving period offset") + periodStart := nextProvingPeriodStart(currEpoch, offset) + Assert(periodStart > currEpoch) + + info, err := ConstructMinerInfo(owner, worker, controlAddrs, params.PeerId, params.Multiaddrs, params.SealProofType) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to construct initial miner info") + infoCid := rt.StorePut(info) + + state, err := ConstructState(infoCid, periodStart, emptyBitfieldCid, emptyArray, emptyMap, emptyDeadlinesCid, emptyVestingFundsCid) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to construct state") + rt.StateCreate(state) + + // Register first cron callback for epoch before the first proving period starts. + enrollCronEvent(rt, periodStart-1, &CronEventPayload{ + EventType: CronEventProvingDeadline, + }) + return nil +} + +///////////// +// Control // +///////////// + +type GetControlAddressesReturn struct { + Owner addr.Address + Worker addr.Address + ControlAddrs []addr.Address +} + +func (a Actor) ControlAddresses(rt Runtime, _ *abi.EmptyValue) *GetControlAddressesReturn { + rt.ValidateImmediateCallerAcceptAny() + var st State + rt.StateReadonly(&st) + info := getMinerInfo(rt, &st) + return &GetControlAddressesReturn{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddrs: info.ControlAddresses, + } +} + +type ChangeWorkerAddressParams struct { + NewWorker addr.Address + NewControlAddrs []addr.Address +} + +// ChangeWorkerAddress will ALWAYS overwrite the existing control addresses with the control addresses passed in the params. +// If a nil addresses slice is passed, the control addresses will be cleared. +// A worker change will be scheduled if the worker passed in the params is different from the existing worker. +func (a Actor) ChangeWorkerAddress(rt Runtime, params *ChangeWorkerAddressParams) *abi.EmptyValue { + var effectiveEpoch abi.ChainEpoch + + newWorker := resolveWorkerAddress(rt, params.NewWorker) + + var controlAddrs []addr.Address + for _, ca := range params.NewControlAddrs { + resolved := resolveControlAddress(rt, ca) + controlAddrs = append(controlAddrs, resolved) + } + + var st State + isWorkerChange := false + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + + // Only the Owner is allowed to change the newWorker and control addresses. + rt.ValidateImmediateCallerIs(info.Owner) + + { + // save the new control addresses + info.ControlAddresses = controlAddrs + } + + { + // save newWorker addr key change request + // This may replace another pending key change. + if newWorker != info.Worker { + isWorkerChange = true + effectiveEpoch = rt.CurrEpoch() + WorkerKeyChangeDelay + + info.PendingWorkerKey = &WorkerKeyChange{ + NewWorker: newWorker, + EffectiveAt: effectiveEpoch, + } + } + } + + err := st.SaveInfo(adt.AsStore(rt), info) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "could not save miner info") + }) + + // we only need to enroll the cron event for newWorker key change as we change the control + // addresses immediately + if isWorkerChange { + cronPayload := CronEventPayload{ + EventType: CronEventWorkerKeyChange, + } + enrollCronEvent(rt, effectiveEpoch, &cronPayload) + } + + return nil +} + +type ChangePeerIDParams struct { + NewID abi.PeerID +} + +func (a Actor) ChangePeerID(rt Runtime, params *ChangePeerIDParams) *abi.EmptyValue { + // TODO: Consider limiting the maximum number of bytes used by the peer ID on-chain. + // https://github.com/filecoin-project/specs-actors/issues/712 + var st State + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + info.PeerId = params.NewID + err := st.SaveInfo(adt.AsStore(rt), info) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "could not save miner info") + }) + return nil +} + +type ChangeMultiaddrsParams struct { + NewMultiaddrs []abi.Multiaddrs +} + +func (a Actor) ChangeMultiaddrs(rt Runtime, params *ChangeMultiaddrsParams) *abi.EmptyValue { + // TODO: Consider limiting the maximum number of bytes used by multiaddrs on-chain. + // https://github.com/filecoin-project/specs-actors/issues/712 + var st State + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + info.Multiaddrs = params.NewMultiaddrs + err := st.SaveInfo(adt.AsStore(rt), info) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "could not save miner info") + }) + return nil +} + +////////////////// +// WindowedPoSt // +////////////////// + +type PoStPartition struct { + // Partitions are numbered per-deadline, from zero. + Index uint64 + // Sectors skipped while proving that weren't already declared faulty + Skipped bitfield.BitField +} + +// Information submitted by a miner to provide a Window PoSt. +type SubmitWindowedPoStParams struct { + // The deadline index which the submission targets. + Deadline uint64 + // The partitions being proven. + Partitions []PoStPartition + // Array of proofs, one per distinct registered proof type present in the sectors being proven. + // In the usual case of a single proof type, this array will always have a single element (independent of number of partitions). + Proofs []proof.PoStProof + // The epoch at which these proofs is being committed to a particular chain. + ChainCommitEpoch abi.ChainEpoch + // The ticket randomness on the chain at the ChainCommitEpoch on the chain this post is committed to + ChainCommitRand abi.Randomness +} + +// Invoked by miner's worker address to submit their fallback post +func (a Actor) SubmitWindowedPoSt(rt Runtime, params *SubmitWindowedPoStParams) *abi.EmptyValue { + currEpoch := rt.CurrEpoch() + store := adt.AsStore(rt) + networkVersion := rt.NetworkVersion() + var st State + + if params.Deadline >= WPoStPeriodDeadlines { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid deadline %d of %d", params.Deadline, WPoStPeriodDeadlines) + } + if params.ChainCommitEpoch >= currEpoch { + rt.Abortf(exitcode.ErrIllegalArgument, "PoSt chain commitment %d must be in the past", params.ChainCommitEpoch) + } + if params.ChainCommitEpoch < currEpoch-WPoStMaxChainCommitAge { + rt.Abortf(exitcode.ErrIllegalArgument, "PoSt chain commitment %d too far in the past, must be after %d", params.ChainCommitEpoch, currEpoch-WPoStMaxChainCommitAge) + } + commRand := rt.GetRandomnessFromTickets(crypto.DomainSeparationTag_PoStChainCommit, params.ChainCommitEpoch, nil) + if !bytes.Equal(commRand, params.ChainCommitRand) { + rt.Abortf(exitcode.ErrIllegalArgument, "post commit randomness mismatched") + } + // TODO: limit the length of proofs array https://github.com/filecoin-project/specs-actors/issues/416 + + // Get the total power/reward. We need these to compute penalties. + rewardStats := requestCurrentEpochBlockReward(rt) + pwrTotal := requestCurrentTotalPower(rt) + + penaltyTotal := abi.NewTokenAmount(0) + pledgeDelta := abi.NewTokenAmount(0) + var postResult *PoStResult + + var info *MinerInfo + rt.StateTransaction(&st, func() { + info = getMinerInfo(rt, &st) + + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + // Validate that the miner didn't try to prove too many partitions at once. + submissionPartitionLimit := loadPartitionsSectorsMax(info.WindowPoStPartitionSectors) + if uint64(len(params.Partitions)) > submissionPartitionLimit { + rt.Abortf(exitcode.ErrIllegalArgument, "too many partitions %d, limit %d", len(params.Partitions), submissionPartitionLimit) + } + + // Load and check deadline. + currDeadline := st.DeadlineInfo(currEpoch) + deadlines, err := st.LoadDeadlines(adt.AsStore(rt)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + + // Check that the miner state indicates that the current proving deadline has started. + // This should only fail if the cron actor wasn't invoked, and matters only in case that it hasn't been + // invoked for a whole proving period, and hence the missed PoSt submissions from the prior occurrence + // of this deadline haven't been processed yet. + if !currDeadline.IsOpen() { + rt.Abortf(exitcode.ErrIllegalState, "proving period %d not yet open at %d", currDeadline.PeriodStart, currEpoch) + } + + // The miner may only submit a proof for the current deadline. + if params.Deadline != currDeadline.Index { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid deadline %d at epoch %d, expected %d", + params.Deadline, currEpoch, currDeadline.Index) + } + + sectors, err := LoadSectors(store, st.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors") + + deadline, err := deadlines.LoadDeadline(store, params.Deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", params.Deadline) + + // Record proven sectors/partitions, returning updates to power and the final set of sectors + // proven/skipped. + // + // NOTE: This function does not actually check the proofs but does assume that they'll be + // successfully validated. The actual proof verification is done below in verifyWindowedPost. + // + // If proof verification fails, the this deadline MUST NOT be saved and this function should + // be aborted. + faultExpiration := currDeadline.Last() + FaultMaxAge + postResult, err = deadline.RecordProvenSectors(store, sectors, info.SectorSize, QuantSpecForDeadline(currDeadline), faultExpiration, params.Partitions) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to process post submission for deadline %d", params.Deadline) + + // Validate proofs + + // Load sector infos for proof, substituting a known-good sector for known-faulty sectors. + // Note: this is slightly sub-optimal, loading info for the recovering sectors again after they were already + // loaded above. + sectorInfos, err := st.LoadSectorInfosForProof(store, postResult.Sectors, postResult.IgnoredSectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load proven sector info") + + // Skip verification if all sectors are faults. + // We still need to allow this call to succeed so the miner can declare a whole partition as skipped. + if len(sectorInfos) > 0 { + // Verify the proof. + // A failed verification doesn't immediately cause a penalty; the miner can try again. + // + // This function aborts on failure. + verifyWindowedPost(rt, currDeadline.Challenge, sectorInfos, params.Proofs) + } + + // Penalize new skipped faults and retracted recoveries as undeclared faults. + // These pay a higher fee than faults declared before the deadline challenge window opened. + undeclaredPenaltyPower := postResult.PenaltyPower() + undeclaredPenaltyTarget := big.Zero() + if networkVersion >= network.Version3 { + // From version 3, skipped faults and retracted recoveries pay nothing at Window PoSt, + // but will incur the "ongoing" fault fee at deadline end. + } else { + undeclaredPenaltyTarget = PledgePenaltyForUndeclaredFault( + rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, undeclaredPenaltyPower.QA, + networkVersion, + ) + // Subtract the "ongoing" fault fee from the amount charged now, since it will be charged at + // the end-of-deadline cron. + undeclaredPenaltyTarget = big.Sub(undeclaredPenaltyTarget, PledgePenaltyForDeclaredFault( + rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, undeclaredPenaltyPower.QA, + networkVersion, + )) + } + + // Penalize recoveries as declared faults (a lower fee than the undeclared, above). + // It sounds odd, but because faults are penalized in arrears, at the _end_ of the faulty period, we must + // penalize recovered sectors here because they won't be penalized by the end-of-deadline cron for the + // immediately-prior faulty period. + declaredPenaltyTarget := big.Zero() + if networkVersion >= network.Version3 { + // From version 3, recovered sectors pay no penalty. + // They won't pay anything at deadline end either, since they'll no longer be faulty. + } else { + declaredPenaltyTarget = PledgePenaltyForDeclaredFault( + rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, postResult.RecoveredPower.QA, + networkVersion, + ) + } + + // Note: We could delay this charge until end of deadline, but that would require more accounting state. + totalPenaltyTarget := big.Add(undeclaredPenaltyTarget, declaredPenaltyTarget) + unlockedBalance := st.GetUnlockedBalance(rt.CurrentBalance()) + vestingPenaltyTotal, balancePenaltyTotal, err := st.PenalizeFundsInPriorityOrder(store, currEpoch, totalPenaltyTarget, unlockedBalance) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to unlock penalty for %v", undeclaredPenaltyPower) + penaltyTotal = big.Add(vestingPenaltyTotal, balancePenaltyTotal) + pledgeDelta = big.Sub(pledgeDelta, vestingPenaltyTotal) + + err = deadlines.UpdateDeadline(store, params.Deadline, deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update deadline %d", params.Deadline) + + err = st.SaveDeadlines(store, deadlines) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") + }) + + // Restore power for recovered sectors. Remove power for new faults. + // NOTE: It would be permissible to delay the power loss until the deadline closes, but that would require + // additional accounting state. + // https://github.com/filecoin-project/specs-actors/issues/414 + requestUpdatePower(rt, postResult.PowerDelta()) + // Burn penalties. + burnFunds(rt, penaltyTotal) + notifyPledgeChanged(rt, pledgeDelta) + return nil +} + +/////////////////////// +// Sector Commitment // +/////////////////////// + +// Proposals must be posted on chain via sma.PublishStorageDeals before PreCommitSector. +// Optimization: PreCommitSector could contain a list of deals that are not published yet. +func (a Actor) PreCommitSector(rt Runtime, params *SectorPreCommitInfo) *abi.EmptyValue { + if _, ok := SupportedProofTypes[params.SealProof]; !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "unsupported seal proof type: %s", params.SealProof) + } + if params.SectorNumber > abi.MaxSectorNumber { + rt.Abortf(exitcode.ErrIllegalArgument, "sector number %d out of range 0..(2^63-1)", params.SectorNumber) + } + if !params.SealedCID.Defined() { + rt.Abortf(exitcode.ErrIllegalArgument, "sealed CID undefined") + } + if params.SealedCID.Prefix() != SealedCIDPrefix { + rt.Abortf(exitcode.ErrIllegalArgument, "sealed CID had wrong prefix") + } + if params.SealRandEpoch >= rt.CurrEpoch() { + rt.Abortf(exitcode.ErrIllegalArgument, "seal challenge epoch %v must be before now %v", params.SealRandEpoch, rt.CurrEpoch()) + } + + challengeEarliest := sealChallengeEarliest(rt.CurrEpoch(), params.SealProof) + if params.SealRandEpoch < challengeEarliest { + // The subsequent commitment proof can't possibly be accepted because the seal challenge will be deemed + // too old. Note that passing this check doesn't guarantee the proof will be soon enough, depending on + // when it arrives. + rt.Abortf(exitcode.ErrIllegalArgument, "seal challenge epoch %v too old, must be after %v", params.SealRandEpoch, challengeEarliest) + } + + if params.Expiration <= rt.CurrEpoch() { + rt.Abortf(exitcode.ErrIllegalArgument, "sector expiration %v must be after now (%v)", params.Expiration, rt.CurrEpoch()) + } + if params.ReplaceCapacity && len(params.DealIDs) == 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot replace sector without committing deals") + } + if params.ReplaceSectorDeadline >= WPoStPeriodDeadlines { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid deadline %d", params.ReplaceSectorDeadline) + } + if params.ReplaceSectorNumber > abi.MaxSectorNumber { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid sector number %d", params.ReplaceSectorNumber) + } + + // gather information from other actors + + rewardStats := requestCurrentEpochBlockReward(rt) + pwrTotal := requestCurrentTotalPower(rt) + dealWeight := requestDealWeight(rt, params.DealIDs, rt.CurrEpoch(), params.Expiration) + + store := adt.AsStore(rt) + var st State + newlyVested := big.Zero() + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + if params.SealProof != info.SealProofType { + rt.Abortf(exitcode.ErrIllegalArgument, "sector seal proof %v must match miner seal proof type %d", params.SealProof, info.SealProofType) + } + + maxDealLimit := dealPerSectorLimit(info.SectorSize) + if uint64(len(params.DealIDs)) > maxDealLimit { + rt.Abortf(exitcode.ErrIllegalArgument, "too many deals for sector %d > %d", len(params.DealIDs), maxDealLimit) + } + + err := st.AllocateSectorNumber(store, params.SectorNumber) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to allocate sector id %d", params.SectorNumber) + + // The following two checks shouldn't be necessary, but it can't + // hurt to double-check (unless it's really just too + // expensive?). + _, preCommitFound, err := st.GetPrecommittedSector(store, params.SectorNumber) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to check pre-commit %v", params.SectorNumber) + if preCommitFound { + rt.Abortf(exitcode.ErrIllegalState, "sector %v already pre-committed", params.SectorNumber) + } + + sectorFound, err := st.HasSectorNo(store, params.SectorNumber) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to check sector %v", params.SectorNumber) + if sectorFound { + rt.Abortf(exitcode.ErrIllegalState, "sector %v already committed", params.SectorNumber) + } + + // Require sector lifetime meets minimum by assuming activation happens at last epoch permitted for seal proof. + // This could make sector maximum lifetime validation more lenient if the maximum sector limit isn't hit first. + maxActivation := rt.CurrEpoch() + MaxSealDuration[params.SealProof] + validateExpiration(rt, maxActivation, params.Expiration, params.SealProof) + + depositMinimum := big.Zero() + if params.ReplaceCapacity { + replaceSector := validateReplaceSector(rt, &st, store, params) + // Note the replaced sector's initial pledge as a lower bound for the new sector's deposit + depositMinimum = replaceSector.InitialPledge + } + + newlyVested, err = st.UnlockVestedFunds(store, rt.CurrEpoch()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to vest funds") + availableBalance := st.GetAvailableBalance(rt.CurrentBalance()) + duration := params.Expiration - rt.CurrEpoch() + + sectorWeight := QAPowerForWeight(info.SectorSize, duration, dealWeight.DealWeight, dealWeight.VerifiedDealWeight) + depositReq := big.Max( + PreCommitDepositForPower(rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, sectorWeight), + depositMinimum, + ) + if availableBalance.LessThan(depositReq) { + rt.Abortf(exitcode.ErrInsufficientFunds, "insufficient funds for pre-commit deposit: %v", depositReq) + } + + st.AddPreCommitDeposit(depositReq) + st.AssertBalanceInvariants(rt.CurrentBalance()) + + if err := st.PutPrecommittedSector(store, &SectorPreCommitOnChainInfo{ + Info: *params, + PreCommitDeposit: depositReq, + PreCommitEpoch: rt.CurrEpoch(), + DealWeight: dealWeight.DealWeight, + VerifiedDealWeight: dealWeight.VerifiedDealWeight, + }); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to write pre-committed sector %v: %v", params.SectorNumber, err) + } + // add precommit expiry to the queue + msd, ok := MaxSealDuration[params.SealProof] + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "no max seal duration set for proof type: %d", params.SealProof) + } + // The +1 here is critical for the batch verification of proofs. Without it, if a proof arrived exactly on the + // due epoch, ProveCommitSector would accept it, then the expiry event would remove it, and then + // ConfirmSectorProofsValid would fail to find it. + expiryBound := rt.CurrEpoch() + msd + 1 + + err = st.AddPreCommitExpiry(store, expiryBound, params.SectorNumber) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add pre-commit expiry to queue") + }) + + notifyPledgeChanged(rt, newlyVested.Neg()) + + return nil +} + +type ProveCommitSectorParams struct { + SectorNumber abi.SectorNumber + Proof []byte +} + +// Checks state of the corresponding sector pre-commitment, then schedules the proof to be verified in bulk +// by the power actor. +// If valid, the power actor will call ConfirmSectorProofsValid at the end of the same epoch as this message. +func (a Actor) ProveCommitSector(rt Runtime, params *ProveCommitSectorParams) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + + store := adt.AsStore(rt) + var st State + rt.StateReadonly(&st) + + // Verify locked funds are are at least the sum of sector initial pledges. + // Note that this call does not actually compute recent vesting, so the reported locked funds may be + // slightly higher than the true amount (i.e. slightly in the miner's favour). + // Computing vesting here would be almost always redundant since vesting is quantized to ~daily units. + // Vesting will be at most one proving period old if computed in the cron callback. + verifyPledgeMeetsInitialRequirements(rt, &st) + + sectorNo := params.SectorNumber + precommit, found, err := st.GetPrecommittedSector(store, sectorNo) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load pre-committed sector %v", sectorNo) + if !found { + rt.Abortf(exitcode.ErrNotFound, "no pre-committed sector %v", sectorNo) + } + + msd, ok := MaxSealDuration[precommit.Info.SealProof] + if !ok { + rt.Abortf(exitcode.ErrIllegalState, "no max seal duration for proof type: %d", precommit.Info.SealProof) + } + proveCommitDue := precommit.PreCommitEpoch + msd + if rt.CurrEpoch() > proveCommitDue { + rt.Abortf(exitcode.ErrIllegalArgument, "commitment proof for %d too late at %d, due %d", sectorNo, rt.CurrEpoch(), proveCommitDue) + } + + svi := getVerifyInfo(rt, &SealVerifyStuff{ + SealedCID: precommit.Info.SealedCID, + InteractiveEpoch: precommit.PreCommitEpoch + PreCommitChallengeDelay, + SealRandEpoch: precommit.Info.SealRandEpoch, + Proof: params.Proof, + DealIDs: precommit.Info.DealIDs, + SectorNumber: precommit.Info.SectorNumber, + RegisteredSealProof: precommit.Info.SealProof, + }) + + code := rt.Send( + builtin.StoragePowerActorAddr, + builtin.MethodsPower.SubmitPoRepForBulkVerify, + svi, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "failed to submit proof for bulk verification") + return nil +} + +func (a Actor) ConfirmSectorProofsValid(rt Runtime, params *builtin.ConfirmSectorProofsParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.StoragePowerActorAddr) + + // get network stats from other actors + rewardStats := requestCurrentEpochBlockReward(rt) + pwrTotal := requestCurrentTotalPower(rt) + circulatingSupply := rt.TotalFilCircSupply() + + // 1. Activate deals, skipping pre-commits with invalid deals. + // - calls the market actor. + // 2. Reschedule replacement sector expiration. + // - loads and saves sectors + // - loads and saves deadlines/partitions + // 3. Add new sectors. + // - loads and saves sectors. + // - loads and saves deadlines/partitions + // + // Ideally, we'd combine some of these operations, but at least we have + // a constant number of them. + + var st State + rt.StateReadonly(&st) + store := adt.AsStore(rt) + info := getMinerInfo(rt, &st) + + // + // Activate storage deals. + // + + // This skips missing pre-commits. + precommittedSectors, err := st.FindPrecommittedSectors(store, params.Sectors...) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load pre-committed sectors") + + // Committed-capacity sectors licensed for early removal by new sectors being proven. + replaceSectors := make(DeadlineSectorMap) + // Pre-commits for new sectors. + var preCommits []*SectorPreCommitOnChainInfo + for _, precommit := range precommittedSectors { + // Check (and activate) storage deals associated to sector. Abort if checks failed. + // TODO: we should batch these calls... + // https://github.com/filecoin-project/specs-actors/issues/474 + code := rt.Send( + builtin.StorageMarketActorAddr, + builtin.MethodsMarket.ActivateDeals, + &market.ActivateDealsParams{ + DealIDs: precommit.Info.DealIDs, + SectorExpiry: precommit.Info.Expiration, + }, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + + if code != exitcode.Ok { + rt.Log(rtt.INFO, "failed to activate deals on sector %d, dropping from prove commit set", precommit.Info.SectorNumber) + continue + } + + preCommits = append(preCommits, precommit) + + if precommit.Info.ReplaceCapacity { + err := replaceSectors.AddValues( + precommit.Info.ReplaceSectorDeadline, + precommit.Info.ReplaceSectorPartition, + uint64(precommit.Info.ReplaceSectorNumber), + ) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to record sectors for replacement") + + } + } + + // When all prove commits have failed abort early + if len(preCommits) == 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "all prove commits failed to validate") + } + + var newPower PowerPair + totalPledge := big.Zero() + totalPrecommitDeposit := big.Zero() + newSectors := make([]*SectorOnChainInfo, 0) + newlyVested := big.Zero() + rt.StateTransaction(&st, func() { + // Schedule expiration for replaced sectors to the end of their next deadline window. + // They can't be removed right now because we want to challenge them immediately before termination. + err = st.RescheduleSectorExpirations(store, rt.CurrEpoch(), info.SectorSize, replaceSectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to replace sector expirations") + + newSectorNos := make([]abi.SectorNumber, 0, len(preCommits)) + for _, precommit := range preCommits { + // compute initial pledge + activation := rt.CurrEpoch() + duration := precommit.Info.Expiration - activation + + // This should have been caught in precommit, but don't let other sectors fail because of it. + if duration < MinSectorExpiration { + rt.Log(rtt.WARN, "precommit %d has lifetime %d less than minimum. ignoring", precommit.Info.SectorNumber, duration, MinSectorExpiration) + continue + } + + power := QAPowerForWeight(info.SectorSize, duration, precommit.DealWeight, precommit.VerifiedDealWeight) + dayReward := ExpectedRewardForPower(rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, power, builtin.EpochsInDay) + // The storage pledge is recorded for use in computing the penalty if this sector is terminated + // before its declared expiration. + // It's not capped to 1 FIL for Space Race, so likely exceeds the actual initial pledge requirement. + storagePledge := ExpectedRewardForPower(rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, power, InitialPledgeProjectionPeriod) + + initialPledge := InitialPledgeForPower(power, rewardStats.ThisEpochBaselinePower, pwrTotal.PledgeCollateral, + rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, circulatingSupply) + + totalPrecommitDeposit = big.Add(totalPrecommitDeposit, precommit.PreCommitDeposit) + totalPledge = big.Add(totalPledge, initialPledge) + newSectorInfo := SectorOnChainInfo{ + SectorNumber: precommit.Info.SectorNumber, + SealProof: precommit.Info.SealProof, + SealedCID: precommit.Info.SealedCID, + DealIDs: precommit.Info.DealIDs, + Expiration: precommit.Info.Expiration, + Activation: activation, + DealWeight: precommit.DealWeight, + VerifiedDealWeight: precommit.VerifiedDealWeight, + InitialPledge: initialPledge, + ExpectedDayReward: dayReward, + ExpectedStoragePledge: storagePledge, + } + newSectors = append(newSectors, &newSectorInfo) + newSectorNos = append(newSectorNos, newSectorInfo.SectorNumber) + } + + err = st.PutSectors(store, newSectors...) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put new sectors") + + err = st.DeletePrecommittedSectors(store, newSectorNos...) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete precommited sectors") + + newPower, err = st.AssignSectorsToDeadlines(store, rt.CurrEpoch(), newSectors, info.WindowPoStPartitionSectors, info.SectorSize) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to assign new sectors to deadlines") + + // Add sector and pledge lock-up to miner state + newlyVested, err = st.UnlockVestedFunds(store, rt.CurrEpoch()) + if err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to vest new funds: %s", err) + } + + // Unlock deposit for successful proofs, make it available for lock-up as initial pledge. + st.AddPreCommitDeposit(totalPrecommitDeposit.Neg()) + + availableBalance := st.GetAvailableBalance(rt.CurrentBalance()) + if availableBalance.LessThan(totalPledge) { + rt.Abortf(exitcode.ErrInsufficientFunds, "insufficient funds for aggregate initial pledge requirement %s, available: %s", totalPledge, availableBalance) + } + + st.AddInitialPledgeRequirement(totalPledge) + st.AssertBalanceInvariants(rt.CurrentBalance()) + }) + + // Request power and pledge update for activated sector. + requestUpdatePower(rt, newPower) + notifyPledgeChanged(rt, big.Sub(totalPledge, newlyVested)) + + return nil +} + +type CheckSectorProvenParams struct { + SectorNumber abi.SectorNumber +} + +func (a Actor) CheckSectorProven(rt Runtime, params *CheckSectorProvenParams) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + + var st State + rt.StateReadonly(&st) + store := adt.AsStore(rt) + sectorNo := params.SectorNumber + + if _, found, err := st.GetSector(store, sectorNo); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to load proven sector %v", sectorNo) + } else if !found { + rt.Abortf(exitcode.ErrNotFound, "sector %v not proven", sectorNo) + } + return nil +} + +///////////////////////// +// Sector Modification // +///////////////////////// + +type ExtendSectorExpirationParams struct { + Extensions []ExpirationExtension +} + +type ExpirationExtension struct { + Deadline uint64 + Partition uint64 + Sectors bitfield.BitField + NewExpiration abi.ChainEpoch +} + +// Changes the expiration epoch for a sector to a new, later one. +// The sector must not be terminated or faulty. +// The sector's power is recomputed for the new expiration. +func (a Actor) ExtendSectorExpiration(rt Runtime, params *ExtendSectorExpirationParams) *abi.EmptyValue { + if uint64(len(params.Extensions)) > AddressedPartitionsMax { + rt.Abortf(exitcode.ErrIllegalArgument, "too many declarations %d, max %d", len(params.Extensions), AddressedPartitionsMax) + } + + // limit the number of sectors declared at once + // https://github.com/filecoin-project/specs-actors/issues/416 + var sectorCount uint64 + for _, decl := range params.Extensions { + if decl.Deadline >= WPoStPeriodDeadlines { + rt.Abortf(exitcode.ErrIllegalArgument, "deadline %d not in range 0..%d", decl.Deadline, WPoStPeriodDeadlines) + } + count, err := decl.Sectors.Count() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, + "failed to count sectors for deadline %d, partition %d", + decl.Deadline, decl.Partition, + ) + if sectorCount > math.MaxUint64-count { + rt.Abortf(exitcode.ErrIllegalArgument, "sector bitfield integer overflow") + } + sectorCount += count + } + if sectorCount > AddressedSectorsMax { + rt.Abortf(exitcode.ErrIllegalArgument, + "too many sectors for declaration %d, max %d", + sectorCount, AddressedSectorsMax, + ) + } + + powerDelta := NewPowerPairZero() + pledgeDelta := big.Zero() + store := adt.AsStore(rt) + var st State + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + deadlines, err := st.LoadDeadlines(adt.AsStore(rt)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + + // Group declarations by deadline, and remember iteration order. + declsByDeadline := map[uint64][]*ExpirationExtension{} + var deadlinesToLoad []uint64 + for i := range params.Extensions { + // Take a pointer to the value inside the slice, don't + // take a reference to the temporary loop variable as it + // will be overwritten every iteration. + decl := ¶ms.Extensions[i] + if _, ok := declsByDeadline[decl.Deadline]; !ok { + deadlinesToLoad = append(deadlinesToLoad, decl.Deadline) + } + declsByDeadline[decl.Deadline] = append(declsByDeadline[decl.Deadline], decl) + } + + sectors, err := LoadSectors(store, st.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors array") + + for _, dlIdx := range deadlinesToLoad { + deadline, err := deadlines.LoadDeadline(store, dlIdx) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlIdx) + + partitions, err := deadline.PartitionsArray(store) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load partitions for deadline %d", dlIdx) + + quant := st.QuantSpecForDeadline(dlIdx) + + for _, decl := range declsByDeadline[dlIdx] { + key := PartitionKey{dlIdx, decl.Partition} + var partition Partition + found, err := partitions.Get(decl.Partition, &partition) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load partition %v", key) + if !found { + rt.Abortf(exitcode.ErrNotFound, "no such partition %v", key) + } + + oldSectors, err := sectors.Load(decl.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors") + newSectors := make([]*SectorOnChainInfo, len(oldSectors)) + for i, sector := range oldSectors { + if decl.NewExpiration < sector.Expiration { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot reduce sector expiration to %d from %d", + decl.NewExpiration, sector.Expiration) + } + validateExpiration(rt, sector.Activation, decl.NewExpiration, sector.SealProof) + + newSector := *sector + newSector.Expiration = decl.NewExpiration + + newSectors[i] = &newSector + } + + // Overwrite sector infos. + err = sectors.Store(newSectors...) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update sectors %v", decl.Sectors) + + // Remove old sectors from partition and assign new sectors. + partitionPowerDelta, partitionPledgeDelta, err := partition.ReplaceSectors(store, oldSectors, newSectors, info.SectorSize, quant) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to replaces sector expirations at %v", key) + + powerDelta = powerDelta.Add(partitionPowerDelta) + pledgeDelta = big.Add(pledgeDelta, partitionPledgeDelta) // expected to be zero, see note below. + + err = partitions.Set(decl.Partition, &partition) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save partition", key) + } + + deadline.Partitions, err = partitions.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save partitions for deadline %d", dlIdx) + + err = deadlines.UpdateDeadline(store, dlIdx, deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadline %d", dlIdx) + } + + st.Sectors, err = sectors.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save sectors") + + err = st.SaveDeadlines(store, deadlines) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") + }) + + requestUpdatePower(rt, powerDelta) + // Note: the pledge delta is expected to be zero, since pledge is not re-calculated for the extension. + // But in case that ever changes, we can do the right thing here. + notifyPledgeChanged(rt, pledgeDelta) + return nil +} + +type TerminateSectorsParams struct { + Terminations []TerminationDeclaration +} + +type TerminationDeclaration struct { + Deadline uint64 + Partition uint64 + Sectors bitfield.BitField +} + +type TerminateSectorsReturn struct { + // Set to true if all early termination work has been completed. When + // false, the miner may choose to repeatedly invoke TerminateSectors + // with no new sectors to process the remainder of the pending + // terminations. While pending terminations are outstanding, the miner + // will not be able to withdraw funds. + Done bool +} + +// Marks some sectors as terminated at the present epoch, earlier than their +// scheduled termination, and adds these sectors to the early termination queue. +// This method then processes up to AddressedSectorsMax sectors and +// AddressedPartitionsMax partitions from the early termination queue, +// terminating deals, paying fines, and returning pledge collateral. While +// sectors remain in this queue: +// +// 1. The miner will be unable to withdraw funds. +// 2. The chain will process up to AddressedSectorsMax sectors and +// AddressedPartitionsMax per epoch until the queue is empty. +// +// The sectors are immediately ignored for Window PoSt proofs, and should be +// masked in the same way as faulty sectors. A miner terminating sectors in the +// current deadline must be careful to compute an appropriate Window PoSt proof +// for the sectors that will be active at the time the PoSt is submitted. +// +// This function may be invoked with no new sectors to explicitly process the +// next batch of sectors. +func (a Actor) TerminateSectors(rt Runtime, params *TerminateSectorsParams) *TerminateSectorsReturn { + // Note: this cannot terminate pre-committed but un-proven sectors. + // They must be allowed to expire (and deposit burnt). + + toProcess := make(DeadlineSectorMap) + for _, term := range params.Terminations { + err := toProcess.Add(term.Deadline, term.Partition, term.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, + "failed to process deadline %d, partition %d", term.Deadline, term.Partition, + ) + } + err := toProcess.Check(AddressedPartitionsMax, AddressedSectorsMax) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "cannot process requested parameters") + + var hadEarlyTerminations bool + var st State + store := adt.AsStore(rt) + currEpoch := rt.CurrEpoch() + powerDelta := NewPowerPairZero() + rt.StateTransaction(&st, func() { + hadEarlyTerminations = havePendingEarlyTerminations(rt, &st) + + info := getMinerInfo(rt, &st) + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + deadlines, err := st.LoadDeadlines(adt.AsStore(rt)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + + // We're only reading the sectors, so there's no need to save this back. + // However, we still want to avoid re-loading this array per-partition. + sectors, err := LoadSectors(store, st.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors") + + err = toProcess.ForEach(func(dlIdx uint64, partitionSectors PartitionSectorMap) error { + quant := st.QuantSpecForDeadline(dlIdx) + + deadline, err := deadlines.LoadDeadline(store, dlIdx) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlIdx) + + removedPower, err := deadline.TerminateSectors(store, sectors, currEpoch, partitionSectors, info.SectorSize, quant) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to terminate sectors in deadline %d", dlIdx) + + st.EarlyTerminations.Set(dlIdx) + + powerDelta = powerDelta.Sub(removedPower) + + err = deadlines.UpdateDeadline(store, dlIdx, deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update deadline %d", dlIdx) + + return nil + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to walk sectors") + + err = st.SaveDeadlines(store, deadlines) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") + }) + + // Now, try to process these sectors. + more := processEarlyTerminations(rt) + if more && !hadEarlyTerminations { + // We have remaining terminations, and we didn't _previously_ + // have early terminations to process, schedule a cron job. + // NOTE: This isn't quite correct. If we repeatedly fill, empty, + // fill, and empty, the queue, we'll keep scheduling new cron + // jobs. However, in practice, that shouldn't be all that bad. + scheduleEarlyTerminationWork(rt) + } + + requestUpdatePower(rt, powerDelta) + + return &TerminateSectorsReturn{Done: !more} +} + +//////////// +// Faults // +//////////// + +type DeclareFaultsParams struct { + Faults []FaultDeclaration +} + +type FaultDeclaration struct { + // The deadline to which the faulty sectors are assigned, in range [0..WPoStPeriodDeadlines) + Deadline uint64 + // Partition index within the deadline containing the faulty sectors. + Partition uint64 + // Sectors in the partition being declared faulty. + Sectors bitfield.BitField +} + +func (a Actor) DeclareFaults(rt Runtime, params *DeclareFaultsParams) *abi.EmptyValue { + toProcess := make(DeadlineSectorMap) + for _, term := range params.Faults { + err := toProcess.Add(term.Deadline, term.Partition, term.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, + "failed to process deadline %d, partition %d", term.Deadline, term.Partition, + ) + } + err := toProcess.Check(AddressedPartitionsMax, AddressedSectorsMax) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "cannot process requested parameters") + + store := adt.AsStore(rt) + var st State + newFaultPowerTotal := NewPowerPairZero() + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + deadlines, err := st.LoadDeadlines(store) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + + sectors, err := LoadSectors(store, st.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors array") + + err = toProcess.ForEach(func(dlIdx uint64, pm PartitionSectorMap) error { + targetDeadline, err := declarationDeadlineInfo(st.ProvingPeriodStart, dlIdx, rt.CurrEpoch()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "invalid fault declaration deadline %d", dlIdx) + + err = validateFRDeclarationDeadline(targetDeadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed fault declaration at deadline %d", dlIdx) + + deadline, err := deadlines.LoadDeadline(store, dlIdx) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlIdx) + + faultExpirationEpoch := targetDeadline.Last() + FaultMaxAge + newFaultyPower, err := deadline.DeclareFaults(store, sectors, info.SectorSize, QuantSpecForDeadline(targetDeadline), faultExpirationEpoch, pm) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to declare faults for deadline %d", dlIdx) + + err = deadlines.UpdateDeadline(store, dlIdx, deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to store deadline %d partitions", dlIdx) + + newFaultPowerTotal = newFaultPowerTotal.Add(newFaultyPower) + return nil + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to iterate deadlines") + + err = st.SaveDeadlines(store, deadlines) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") + }) + + // Remove power for new faulty sectors. + // NOTE: It would be permissible to delay the power loss until the deadline closes, but that would require + // additional accounting state. + // https://github.com/filecoin-project/specs-actors/issues/414 + requestUpdatePower(rt, newFaultPowerTotal.Neg()) + + // Payment of penalty for declared faults is deferred to the deadline cron. + return nil +} + +type DeclareFaultsRecoveredParams struct { + Recoveries []RecoveryDeclaration +} + +type RecoveryDeclaration struct { + // The deadline to which the recovered sectors are assigned, in range [0..WPoStPeriodDeadlines) + Deadline uint64 + // Partition index within the deadline containing the recovered sectors. + Partition uint64 + // Sectors in the partition being declared recovered. + Sectors bitfield.BitField +} + +func (a Actor) DeclareFaultsRecovered(rt Runtime, params *DeclareFaultsRecoveredParams) *abi.EmptyValue { + toProcess := make(DeadlineSectorMap) + for _, term := range params.Recoveries { + err := toProcess.Add(term.Deadline, term.Partition, term.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, + "failed to process deadline %d, partition %d", term.Deadline, term.Partition, + ) + } + err := toProcess.Check(AddressedPartitionsMax, AddressedSectorsMax) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "cannot process requested parameters") + + store := adt.AsStore(rt) + var st State + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + deadlines, err := st.LoadDeadlines(adt.AsStore(rt)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + + sectors, err := LoadSectors(store, st.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors array") + + err = toProcess.ForEach(func(dlIdx uint64, pm PartitionSectorMap) error { + targetDeadline, err := declarationDeadlineInfo(st.ProvingPeriodStart, dlIdx, rt.CurrEpoch()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "invalid recovery declaration deadline %d", dlIdx) + err = validateFRDeclarationDeadline(targetDeadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed recovery declaration at deadline %d", dlIdx) + + deadline, err := deadlines.LoadDeadline(store, dlIdx) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlIdx) + + err = deadline.DeclareFaultsRecovered(store, sectors, info.SectorSize, pm) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to declare recoveries for deadline %d", dlIdx) + + err = deadlines.UpdateDeadline(store, dlIdx, deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to store deadline %d", dlIdx) + return nil + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to walk sectors") + + err = st.SaveDeadlines(store, deadlines) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") + }) + + // Power is not restored yet, but when the recovered sectors are successfully PoSted. + return nil +} + +///////////////// +// Maintenance // +///////////////// + +type CompactPartitionsParams struct { + Deadline uint64 + Partitions bitfield.BitField +} + +// Compacts a number of partitions at one deadline by removing terminated sectors, re-ordering the remaining sectors, +// and assigning them to new partitions so as to completely fill all but one partition with live sectors. +// The addressed partitions are removed from the deadline, and new ones appended. +// The final partition in the deadline is always included in the compaction, whether or not explicitly requested. +// Removed sectors are removed from state entirely. +// May not be invoked if the deadline has any un-processed early terminations. +func (a Actor) CompactPartitions(rt Runtime, params *CompactPartitionsParams) *abi.EmptyValue { + if params.Deadline >= WPoStPeriodDeadlines { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid deadline %v", params.Deadline) + } + + partitionCount, err := params.Partitions.Count() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to parse partitions bitfield") + + store := adt.AsStore(rt) + var st State + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + if !deadlineIsMutable(st.ProvingPeriodStart, params.Deadline, rt.CurrEpoch()) { + rt.Abortf(exitcode.ErrForbidden, + "cannot compact deadline %d during its challenge window or the prior challenge window", params.Deadline) + } + + submissionPartitionLimit := loadPartitionsSectorsMax(info.WindowPoStPartitionSectors) + if partitionCount > submissionPartitionLimit { + rt.Abortf(exitcode.ErrIllegalArgument, "too many partitions %d, limit %d", partitionCount, submissionPartitionLimit) + } + + quant := st.QuantSpecForDeadline(params.Deadline) + + deadlines, err := st.LoadDeadlines(store) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + + deadline, err := deadlines.LoadDeadline(store, params.Deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", params.Deadline) + + live, dead, removedPower, err := deadline.RemovePartitions(store, params.Partitions, quant) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to remove partitions from deadline %d", params.Deadline) + + err = st.DeleteSectors(store, dead) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete dead sectors") + + sectors, err := st.LoadSectorInfos(store, live) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load moved sectors") + + newPower, err := deadline.AddSectors(store, info.WindowPoStPartitionSectors, sectors, info.SectorSize, quant) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add back moved sectors") + + if !removedPower.Equals(newPower) { + rt.Abortf(exitcode.ErrIllegalState, "power changed when compacting partitions: was %v, is now %v", removedPower, newPower) + } + }) + return nil +} + +type CompactSectorNumbersParams struct { + MaskSectorNumbers bitfield.BitField +} + +// Compacts sector number allocations to reduce the size of the allocated sector +// number bitfield. +// +// When allocating sector numbers sequentially, or in sequential groups, this +// bitfield should remain fairly small. However, if the bitfield grows large +// enough such that PreCommitSector fails (or becomes expensive), this method +// can be called to mask out (throw away) entire ranges of unused sector IDs. +// For example, if sectors 1-99 and 101-200 have been allocated, sector number +// 99 can be masked out to collapse these two ranges into one. +func (a Actor) CompactSectorNumbers(rt Runtime, params *CompactSectorNumbersParams) *abi.EmptyValue { + lastSectorNo, err := params.MaskSectorNumbers.Last() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "invalid mask bitfield") + if lastSectorNo > abi.MaxSectorNumber { + rt.Abortf(exitcode.ErrIllegalArgument, "masked sector number %d exceeded max sector number", lastSectorNo) + } + + store := adt.AsStore(rt) + var st State + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker)...) + + err := st.MaskSectorNumbers(store, params.MaskSectorNumbers) + + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to mask sector numbers") + }) + return nil +} + +/////////////////////// +// Pledge Collateral // +/////////////////////// + +// Locks up some amount of the miner's unlocked balance (including funds received alongside the invoking message). +func (a Actor) AddLockedFund(rt Runtime, amountToLock *abi.TokenAmount) *abi.EmptyValue { + if amountToLock.Sign() < 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot lock up a negative amount of funds") + } + + vestingSchedule := &RewardVestingSpecV0 + if rt.NetworkVersion() >= network.Version1 { + vestingSchedule = &RewardVestingSpecV1 + } + + var st State + newlyVested := big.Zero() + rt.StateTransaction(&st, func() { + var err error + info := getMinerInfo(rt, &st) + rt.ValidateImmediateCallerIs(append(info.ControlAddresses, info.Owner, info.Worker, builtin.RewardActorAddr)...) + + // This may lock up unlocked balance that was covering InitialPledgeRequirements + // This ensures that the amountToLock is always locked up if the miner account + // can cover it. + unlockedBalance := st.GetUnlockedBalance(rt.CurrentBalance()) + if unlockedBalance.LessThan(*amountToLock) { + rt.Abortf(exitcode.ErrInsufficientFunds, "insufficient funds to lock, available: %v, requested: %v", unlockedBalance, *amountToLock) + } + + newlyVested, err = st.AddLockedFunds(adt.AsStore(rt), rt.CurrEpoch(), *amountToLock, vestingSchedule) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to lock funds in vesting table") + }) + + notifyPledgeChanged(rt, big.Sub(*amountToLock, newlyVested)) + + return nil +} + +type ReportConsensusFaultParams struct { + BlockHeader1 []byte + BlockHeader2 []byte + BlockHeaderExtra []byte +} + +func (a Actor) ReportConsensusFault(rt Runtime, params *ReportConsensusFaultParams) *abi.EmptyValue { + // Note: only the first reporter of any fault is rewarded. + // Subsequent invocations fail because the target miner has been removed. + rt.ValidateImmediateCallerType(builtin.CallerTypesSignable...) + reporter := rt.Caller() + + fault, err := rt.VerifyConsensusFault(params.BlockHeader1, params.BlockHeader2, params.BlockHeaderExtra) + if err != nil { + rt.Abortf(exitcode.ErrIllegalArgument, "fault not verified: %s", err) + } + + // Elapsed since the fault (i.e. since the higher of the two blocks) + faultAge := rt.CurrEpoch() - fault.Epoch + if faultAge <= 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid fault epoch %v ahead of current %v", fault.Epoch, rt.CurrEpoch()) + } + + // Reward reporter with a share of the miner's current balance. + slasherReward := RewardForConsensusSlashReport(faultAge, rt.CurrentBalance()) + code := rt.Send(reporter, builtin.MethodSend, nil, slasherReward, &builtin.Discard{}) + builtin.RequireSuccess(rt, code, "failed to reward reporter") + + var st State + rt.StateReadonly(&st) + + // Notify power actor with lock-up total being removed. + code = rt.Send( + builtin.StoragePowerActorAddr, + builtin.MethodsPower.OnConsensusFault, + &st.LockedFunds, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "failed to notify power actor on consensus fault") + + // close deals and burn funds + terminateMiner(rt) + + return nil +} + +type WithdrawBalanceParams struct { + AmountRequested abi.TokenAmount +} + +func (a Actor) WithdrawBalance(rt Runtime, params *WithdrawBalanceParams) *abi.EmptyValue { + var st State + if params.AmountRequested.LessThan(big.Zero()) { + rt.Abortf(exitcode.ErrIllegalArgument, "negative fund requested for withdrawal: %s", params.AmountRequested) + } + var info *MinerInfo + newlyVested := big.Zero() + rt.StateTransaction(&st, func() { + var err error + info = getMinerInfo(rt, &st) + // Only the owner is allowed to withdraw the balance as it belongs to/is controlled by the owner + // and not the worker. + rt.ValidateImmediateCallerIs(info.Owner) + // Ensure we don't have any pending terminations. + if count, err := st.EarlyTerminations.Count(); err != nil { + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to count early terminations") + } else if count > 0 { + rt.Abortf(exitcode.ErrForbidden, + "cannot withdraw funds while %d deadlines have terminated sectors with outstanding fees", + count, + ) + } + + // Unlock vested funds so we can spend them. + newlyVested, err = st.UnlockVestedFunds(adt.AsStore(rt), rt.CurrEpoch()) + if err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to vest fund: %v", err) + } + + // Verify InitialPledgeRequirement does not exceed unlocked funds + verifyPledgeMeetsInitialRequirements(rt, &st) + }) + + currBalance := rt.CurrentBalance() + amountWithdrawn := big.Min(st.GetAvailableBalance(currBalance), params.AmountRequested) + Assert(amountWithdrawn.GreaterThanEqual(big.Zero())) + Assert(amountWithdrawn.LessThanEqual(currBalance)) + + code := rt.Send(info.Owner, builtin.MethodSend, nil, amountWithdrawn, &builtin.Discard{}) + builtin.RequireSuccess(rt, code, "failed to withdraw balance") + + pledgeDelta := newlyVested.Neg() + notifyPledgeChanged(rt, pledgeDelta) + + st.AssertBalanceInvariants(rt.CurrentBalance()) + return nil +} + +////////// +// Cron // +////////// + +func (a Actor) OnDeferredCronEvent(rt Runtime, payload *CronEventPayload) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.StoragePowerActorAddr) + + switch payload.EventType { + case CronEventProvingDeadline: + handleProvingDeadline(rt) + case CronEventWorkerKeyChange: + commitWorkerKeyChange(rt) + case CronEventProcessEarlyTerminations: + if processEarlyTerminations(rt) { + scheduleEarlyTerminationWork(rt) + } + } + + return nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Utility functions & helpers +//////////////////////////////////////////////////////////////////////////////// + +func processEarlyTerminations(rt Runtime) (more bool) { + store := adt.AsStore(rt) + networkVersion := rt.NetworkVersion() + + // TODO: We're using the current power+epoch reward. Technically, we + // should use the power/reward at the time of termination. + // https://github.com/filecoin-project/specs-actors/pull/648 + rewardStats := requestCurrentEpochBlockReward(rt) + pwrTotal := requestCurrentTotalPower(rt) + + var ( + result TerminationResult + dealsToTerminate []market.OnMinerSectorsTerminateParams + penalty = big.Zero() + pledgeDelta = big.Zero() + ) + + var st State + rt.StateTransaction(&st, func() { + var err error + result, more, err = st.PopEarlyTerminations(store, AddressedPartitionsMax, AddressedSectorsMax) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to pop early terminations") + + // Nothing to do, don't waste any time. + // This can happen if we end up processing early terminations + // before the cron callback fires. + if result.IsEmpty() { + return + } + + info := getMinerInfo(rt, &st) + + sectors, err := LoadSectors(store, st.Sectors) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sectors array") + + totalInitialPledge := big.Zero() + dealsToTerminate = make([]market.OnMinerSectorsTerminateParams, 0, len(result.Sectors)) + err = result.ForEach(func(epoch abi.ChainEpoch, sectorNos bitfield.BitField) error { + sectors, err := sectors.Load(sectorNos) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sector infos") + params := market.OnMinerSectorsTerminateParams{ + Epoch: epoch, + DealIDs: make([]abi.DealID, 0, len(sectors)), // estimate ~one deal per sector. + } + for _, sector := range sectors { + params.DealIDs = append(params.DealIDs, sector.DealIDs...) + totalInitialPledge = big.Add(totalInitialPledge, sector.InitialPledge) + } + penalty = big.Add(penalty, terminationPenalty(info.SectorSize, epoch, networkVersion, + rewardStats.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, sectors)) + dealsToTerminate = append(dealsToTerminate, params) + + return nil + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to process terminations") + + // Unlock funds for penalties. + // TODO: handle bankrupt miner: https://github.com/filecoin-project/specs-actors/issues/627 + // We're intentionally reducing the penalty paid to what we have. + unlockedBalance := st.GetUnlockedBalance(rt.CurrentBalance()) + penaltyFromVesting, penaltyFromBalance, err := st.PenalizeFundsInPriorityOrder(store, rt.CurrEpoch(), penalty, unlockedBalance) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to unlock unvested funds") + penalty = big.Add(penaltyFromVesting, penaltyFromBalance) + + // Remove pledge requirement. + st.AddInitialPledgeRequirement(totalInitialPledge.Neg()) + pledgeDelta = big.Add(totalInitialPledge, penaltyFromVesting).Neg() + }) + + // We didn't do anything, abort. + if result.IsEmpty() { + return more + } + + // Burn penalty. + burnFunds(rt, penalty) + + // Return pledge. + notifyPledgeChanged(rt, pledgeDelta) + + // Terminate deals. + for _, params := range dealsToTerminate { + requestTerminateDeals(rt, params.Epoch, params.DealIDs) + } + + // reschedule cron worker, if necessary. + return more +} + +// Invoked at the end of the last epoch for each proving deadline. +func handleProvingDeadline(rt Runtime) { + currEpoch := rt.CurrEpoch() + store := adt.AsStore(rt) + networkVersion := rt.NetworkVersion() + + epochReward := requestCurrentEpochBlockReward(rt) + pwrTotal := requestCurrentTotalPower(rt) + + hadEarlyTerminations := false + + powerDelta := PowerPair{big.Zero(), big.Zero()} + penaltyTotal := abi.NewTokenAmount(0) + pledgeDelta := abi.NewTokenAmount(0) + + var st State + rt.StateTransaction(&st, func() { + var err error + { + // Vest locked funds. + // This happens first so that any subsequent penalties are taken + // from locked vesting funds before funds free this epoch. + newlyVested, err := st.UnlockVestedFunds(store, rt.CurrEpoch()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to vest funds") + pledgeDelta = big.Add(pledgeDelta, newlyVested.Neg()) + } + + { + // expire pre-committed sectors + expiryQ, err := LoadBitfieldQueue(store, st.PreCommittedSectorsExpiry, st.QuantSpecEveryDeadline()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sector expiry queue") + + bf, modified, err := expiryQ.PopUntil(currEpoch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to pop expired sectors") + + if modified { + st.PreCommittedSectorsExpiry, err = expiryQ.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save expiry queue") + } + + depositToBurn, err := st.checkPrecommitExpiry(store, bf) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to expire pre-committed sectors") + penaltyTotal = big.Add(penaltyTotal, depositToBurn) + } + + // Record whether or not we _had_ early terminations in the queue before this method. + // That way, don't re-schedule a cron callback if one is already scheduled. + hadEarlyTerminations = havePendingEarlyTerminations(rt, &st) + + // Note: because the cron actor is not invoked on epochs with empty tipsets, the current epoch is not necessarily + // exactly the final epoch of the deadline; it may be slightly later (i.e. in the subsequent deadline/period). + // Further, this method is invoked once *before* the first proving period starts, after the actor is first + // constructed; this is detected by !dlInfo.PeriodStarted(). + // Use dlInfo.PeriodEnd() rather than rt.CurrEpoch unless certain of the desired semantics. + dlInfo := st.DeadlineInfo(currEpoch) + if !dlInfo.PeriodStarted() { + return // Skip checking faults on the first, incomplete period. + } + deadlines, err := st.LoadDeadlines(store) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadlines") + deadline, err := deadlines.LoadDeadline(store, dlInfo.Index) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load deadline %d", dlInfo.Index) + quant := QuantSpecForDeadline(dlInfo) + unlockedBalance := st.GetUnlockedBalance(rt.CurrentBalance()) + + // Remember power that was faulty before processing any missed PoSts. + previouslyFaultyPower := deadline.FaultyPower.QA + + { + // Detect and penalize missing proofs. + faultExpiration := dlInfo.Last() + FaultMaxAge + + newFaultyPower, failedRecoveryPower, err := deadline.ProcessDeadlineEnd(store, quant, faultExpiration) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to process end of deadline %d", dlInfo.Index) + + powerDelta = powerDelta.Sub(newFaultyPower) + + if networkVersion >= network.Version3 { + // From network version 3, faults detected from a missed PoSt pay nothing. + // Failed recoveries pay nothing here, but will pay the ongoing fault fee in the subsequent block. + } else { + penalizePowerTotal := big.Add(newFaultyPower.QA, failedRecoveryPower.QA) + + // Unlock sector penalty for all undeclared faults. + penaltyTarget := PledgePenaltyForUndeclaredFault(epochReward.ThisEpochRewardSmoothed, pwrTotal.QualityAdjPowerSmoothed, + penalizePowerTotal, rt.NetworkVersion()) + // Subtract the "ongoing" fault fee from the amount charged now, since it will be added on just below. + penaltyTarget = big.Sub(penaltyTarget, PledgePenaltyForDeclaredFault(epochReward.ThisEpochRewardSmoothed, + pwrTotal.QualityAdjPowerSmoothed, penalizePowerTotal, networkVersion)) + penaltyFromVesting, penaltyFromBalance, err := st.PenalizeFundsInPriorityOrder(store, currEpoch, penaltyTarget, unlockedBalance) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to unlock penalty") + unlockedBalance = big.Sub(unlockedBalance, penaltyFromBalance) + penaltyTotal = big.Sum(penaltyTotal, penaltyFromVesting, penaltyFromBalance) + pledgeDelta = big.Sub(pledgeDelta, penaltyFromVesting) + } + } + { + // Record faulty power for penalisation of ongoing faults, before popping expirations. + // This includes any power that was just faulted from missing a PoSt. + ongoingFaultyPower := deadline.FaultyPower.QA + if networkVersion >= network.Version3 { + // From network version 3, this *excludes* any power that was just faulted from missing a PoSt. + // It includes power that was previously declared, skipped, or detected faulty, whether or + // not it is also marked for recovery. + ongoingFaultyPower = previouslyFaultyPower + } + penaltyTarget := PledgePenaltyForDeclaredFault(epochReward.ThisEpochRewardSmoothed, + pwrTotal.QualityAdjPowerSmoothed, ongoingFaultyPower, networkVersion) + penaltyFromVesting, penaltyFromBalance, err := st.PenalizeFundsInPriorityOrder(store, currEpoch, penaltyTarget, unlockedBalance) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to unlock penalty") + unlockedBalance = big.Sub(unlockedBalance, penaltyFromBalance) //nolint:ineffassign + penaltyTotal = big.Sum(penaltyTotal, penaltyFromVesting, penaltyFromBalance) + pledgeDelta = big.Sub(pledgeDelta, penaltyFromVesting) + } + { + // Expire sectors that are due, either for on-time expiration or "early" faulty-for-too-long. + expired, err := deadline.PopExpiredSectors(store, dlInfo.Last(), quant) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load expired sectors") + + // Release pledge requirements for the sectors expiring on-time. + // Pledge for the sectors expiring early is retained to support the termination fee that will be assessed + // when the early termination is processed. + pledgeDelta = big.Sub(pledgeDelta, expired.OnTimePledge) + st.AddInitialPledgeRequirement(expired.OnTimePledge.Neg()) + + // Record reduction in power of the amount of expiring active power. + // Faulty power has already been lost, so the amount expiring can be excluded from the delta. + powerDelta = powerDelta.Sub(expired.ActivePower) + + // Record deadlines with early terminations. While this + // bitfield is non-empty, the miner is locked until they + // pay the fee. + noEarlyTerminations, err := expired.EarlySectors.IsEmpty() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to count early terminations") + if !noEarlyTerminations { + st.EarlyTerminations.Set(dlInfo.Index) + } + + // The termination fee is paid later, in early-termination queue processing. + // We could charge at least the undeclared fault fee here, which is a lower bound on the penalty. + // https://github.com/filecoin-project/specs-actors/issues/674 + + // The deals are not terminated yet, that is left for processing of the early termination queue. + } + + // Save new deadline state. + err = deadlines.UpdateDeadline(store, dlInfo.Index, deadline) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update deadline %d", dlInfo.Index) + + err = st.SaveDeadlines(store, deadlines) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save deadlines") + + // Increment current deadline, and proving period if necessary. + if dlInfo.PeriodStarted() { + st.CurrentDeadline = (st.CurrentDeadline + 1) % WPoStPeriodDeadlines + if st.CurrentDeadline == 0 { + st.ProvingPeriodStart = st.ProvingPeriodStart + WPoStProvingPeriod + } + } + }) + + // Remove power for new faults, and burn penalties. + requestUpdatePower(rt, powerDelta) + burnFunds(rt, penaltyTotal) + notifyPledgeChanged(rt, pledgeDelta) + + // Schedule cron callback for next deadline's last epoch. + newDlInfo := st.DeadlineInfo(currEpoch) + enrollCronEvent(rt, newDlInfo.Last(), &CronEventPayload{ + EventType: CronEventProvingDeadline, + }) + + // Record whether or not we _have_ early terminations now. + hasEarlyTerminations := havePendingEarlyTerminations(rt, &st) + + // If we didn't have pending early terminations before, but we do now, + // handle them at the next epoch. + if !hadEarlyTerminations && hasEarlyTerminations { + // First, try to process some of these terminations. + if processEarlyTerminations(rt) { + // If that doesn't work, just defer till the next epoch. + scheduleEarlyTerminationWork(rt) + } + // Note: _don't_ process early terminations if we had a cron + // callback already scheduled. In that case, we'll already have + // processed AddressedSectorsMax terminations this epoch. + } +} + +// Check expiry is exactly *the epoch before* the start of a proving period. +func validateExpiration(rt Runtime, activation, expiration abi.ChainEpoch, sealProof abi.RegisteredSealProof) { + // expiration cannot be less than minimum after activation + if expiration-activation < MinSectorExpiration { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid expiration %d, total sector lifetime (%d) must exceed %d after activation %d", + expiration, expiration-activation, MinSectorExpiration, activation) + } + + // expiration cannot exceed MaxSectorExpirationExtension from now + if expiration > rt.CurrEpoch()+MaxSectorExpirationExtension { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid expiration %d, cannot be more than %d past current epoch %d", + expiration, MaxSectorExpirationExtension, rt.CurrEpoch()) + } + + // total sector lifetime cannot exceed SectorMaximumLifetime for the sector's seal proof + maxLifetime, err := builtin.SealProofSectorMaximumLifetime(sealProof) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "unknown seal proof %d", sealProof) + if expiration-activation > maxLifetime { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid expiration %d, total sector lifetime (%d) cannot exceed %d after activation %d", + expiration, expiration-activation, maxLifetime, activation) + } +} + +func validateReplaceSector(rt Runtime, st *State, store adt.Store, params *SectorPreCommitInfo) *SectorOnChainInfo { + replaceSector, found, err := st.GetSector(store, params.ReplaceSectorNumber) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load sector %v", params.SectorNumber) + if !found { + rt.Abortf(exitcode.ErrNotFound, "no such sector %v to replace", params.ReplaceSectorNumber) + } + + if len(replaceSector.DealIDs) > 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot replace sector %v which has deals", params.ReplaceSectorNumber) + } + if params.SealProof != replaceSector.SealProof { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot replace sector %v seal proof %v with seal proof %v", + params.ReplaceSectorNumber, replaceSector.SealProof, params.SealProof) + } + if params.Expiration < replaceSector.Expiration { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot replace sector %v expiration %v with sooner expiration %v", + params.ReplaceSectorNumber, replaceSector.Expiration, params.Expiration) + } + + err = st.CheckSectorHealth(store, params.ReplaceSectorDeadline, params.ReplaceSectorPartition, params.ReplaceSectorNumber) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to replace sector %v", params.ReplaceSectorNumber) + + return replaceSector +} + +func enrollCronEvent(rt Runtime, eventEpoch abi.ChainEpoch, callbackPayload *CronEventPayload) { + payload := new(bytes.Buffer) + err := callbackPayload.MarshalCBOR(payload) + if err != nil { + rt.Abortf(exitcode.ErrIllegalArgument, "failed to serialize payload: %v", err) + } + code := rt.Send( + builtin.StoragePowerActorAddr, + builtin.MethodsPower.EnrollCronEvent, + &power.EnrollCronEventParams{ + EventEpoch: eventEpoch, + Payload: payload.Bytes(), + }, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "failed to enroll cron event") +} + +func requestUpdatePower(rt Runtime, delta PowerPair) { + if delta.IsZero() { + return + } + code := rt.Send( + builtin.StoragePowerActorAddr, + builtin.MethodsPower.UpdateClaimedPower, + &power.UpdateClaimedPowerParams{ + RawByteDelta: delta.Raw, + QualityAdjustedDelta: delta.QA, + }, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "failed to update power with %v", delta) +} + +func requestTerminateDeals(rt Runtime, epoch abi.ChainEpoch, dealIDs []abi.DealID) { + for len(dealIDs) > 0 { + size := min64(cbg.MaxLength, uint64(len(dealIDs))) + code := rt.Send( + builtin.StorageMarketActorAddr, + builtin.MethodsMarket.OnMinerSectorsTerminate, + &market.OnMinerSectorsTerminateParams{ + Epoch: epoch, + DealIDs: dealIDs[:size], + }, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "failed to terminate deals, exit code %v", code) + dealIDs = dealIDs[size:] + } +} + +func requestTerminateAllDeals(rt Runtime, st *State) { //nolint:deadcode,unused + // TODO: red flag this is an ~unbounded computation. + // Transform into an idempotent partial computation that can be progressed on each invocation. + // https://github.com/filecoin-project/specs-actors/issues/675 + dealIds := []abi.DealID{} + if err := st.ForEachSector(adt.AsStore(rt), func(sector *SectorOnChainInfo) { + dealIds = append(dealIds, sector.DealIDs...) + }); err != nil { + rt.Abortf(exitcode.ErrIllegalState, "failed to traverse sectors for termination: %v", err) + } + + requestTerminateDeals(rt, rt.CurrEpoch(), dealIds) +} + +func scheduleEarlyTerminationWork(rt Runtime) { + enrollCronEvent(rt, rt.CurrEpoch()+1, &CronEventPayload{ + EventType: CronEventProcessEarlyTerminations, + }) +} + +func havePendingEarlyTerminations(rt Runtime, st *State) bool { + // Record this up-front + noEarlyTerminations, err := st.EarlyTerminations.IsEmpty() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to count early terminations") + return !noEarlyTerminations +} + +func verifyWindowedPost(rt Runtime, challengeEpoch abi.ChainEpoch, sectors []*SectorOnChainInfo, proofs []proof.PoStProof) { + minerActorID, err := addr.IDFromAddress(rt.Receiver()) + AssertNoError(err) // Runtime always provides ID-addresses + + // Regenerate challenge randomness, which must match that generated for the proof. + var addrBuf bytes.Buffer + receiver := rt.Receiver() + err = receiver.MarshalCBOR(&addrBuf) + AssertNoError(err) + postRandomness := rt.GetRandomnessFromBeacon(crypto.DomainSeparationTag_WindowedPoStChallengeSeed, challengeEpoch, addrBuf.Bytes()) + + sectorProofInfo := make([]proof.SectorInfo, len(sectors)) + for i, s := range sectors { + sectorProofInfo[i] = proof.SectorInfo{ + SealProof: s.SealProof, + SectorNumber: s.SectorNumber, + SealedCID: s.SealedCID, + } + } + + // Get public inputs + pvInfo := proof.WindowPoStVerifyInfo{ + Randomness: abi.PoStRandomness(postRandomness), + Proofs: proofs, + ChallengedSectors: sectorProofInfo, + Prover: abi.ActorID(minerActorID), + } + + // Verify the PoSt Proof + if err = rt.VerifyPoSt(pvInfo); err != nil { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid PoSt %+v: %s", pvInfo, err) + } +} + +// SealVerifyParams is the structure of information that must be sent with a +// message to commit a sector. Most of this information is not needed in the +// state tree but will be verified in sm.CommitSector. See SealCommitment for +// data stored on the state tree for each sector. +type SealVerifyStuff struct { + SealedCID cid.Cid // CommR + InteractiveEpoch abi.ChainEpoch // Used to derive the interactive PoRep challenge. + abi.RegisteredSealProof + Proof []byte + DealIDs []abi.DealID + abi.SectorNumber + SealRandEpoch abi.ChainEpoch // Used to tie the seal to a chain. +} + +func getVerifyInfo(rt Runtime, params *SealVerifyStuff) *proof.SealVerifyInfo { + if rt.CurrEpoch() <= params.InteractiveEpoch { + rt.Abortf(exitcode.ErrForbidden, "too early to prove sector") + } + + // Check randomness. + challengeEarliest := sealChallengeEarliest(rt.CurrEpoch(), params.RegisteredSealProof) + if params.SealRandEpoch < challengeEarliest { + rt.Abortf(exitcode.ErrIllegalArgument, "seal epoch %v too old, expected >= %v", params.SealRandEpoch, challengeEarliest) + } + + commD := requestUnsealedSectorCID(rt, params.RegisteredSealProof, params.DealIDs) + + minerActorID, err := addr.IDFromAddress(rt.Receiver()) + AssertNoError(err) // Runtime always provides ID-addresses + + buf := new(bytes.Buffer) + receiver := rt.Receiver() + err = receiver.MarshalCBOR(buf) + AssertNoError(err) + + svInfoRandomness := rt.GetRandomnessFromTickets(crypto.DomainSeparationTag_SealRandomness, params.SealRandEpoch, buf.Bytes()) + svInfoInteractiveRandomness := rt.GetRandomnessFromBeacon(crypto.DomainSeparationTag_InteractiveSealChallengeSeed, params.InteractiveEpoch, buf.Bytes()) + + return &proof.SealVerifyInfo{ + SealProof: params.RegisteredSealProof, + SectorID: abi.SectorID{ + Miner: abi.ActorID(minerActorID), + Number: params.SectorNumber, + }, + DealIDs: params.DealIDs, + InteractiveRandomness: abi.InteractiveSealRandomness(svInfoInteractiveRandomness), + Proof: params.Proof, + Randomness: abi.SealRandomness(svInfoRandomness), + SealedCID: params.SealedCID, + UnsealedCID: commD, + } +} + +// Closes down this miner by erasing its power, terminating all its deals and burning its funds +func terminateMiner(rt Runtime) { + var st State + rt.StateReadonly(&st) + + requestTerminateAllDeals(rt, &st) + + // Delete the actor and burn all remaining funds + rt.DeleteActor(builtin.BurntFundsActorAddr) +} + +// Requests the storage market actor compute the unsealed sector CID from a sector's deals. +func requestUnsealedSectorCID(rt Runtime, proofType abi.RegisteredSealProof, dealIDs []abi.DealID) cid.Cid { + var unsealedCID cbg.CborCid + code := rt.Send( + builtin.StorageMarketActorAddr, + builtin.MethodsMarket.ComputeDataCommitment, + &market.ComputeDataCommitmentParams{ + SectorType: proofType, + DealIDs: dealIDs, + }, + abi.NewTokenAmount(0), + &unsealedCID, + ) + builtin.RequireSuccess(rt, code, "failed request for unsealed sector CID for deals %v", dealIDs) + return cid.Cid(unsealedCID) +} + +func requestDealWeight(rt Runtime, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch) market.VerifyDealsForActivationReturn { + var dealWeights market.VerifyDealsForActivationReturn + + code := rt.Send( + builtin.StorageMarketActorAddr, + builtin.MethodsMarket.VerifyDealsForActivation, + &market.VerifyDealsForActivationParams{ + DealIDs: dealIDs, + SectorStart: sectorStart, + SectorExpiry: sectorExpiry, + }, + abi.NewTokenAmount(0), + &dealWeights, + ) + builtin.RequireSuccess(rt, code, "failed to verify deals and get deal weight") + return dealWeights + +} + +func commitWorkerKeyChange(rt Runtime) *abi.EmptyValue { + var st State + rt.StateTransaction(&st, func() { + info := getMinerInfo(rt, &st) + // A previously scheduled key change could have been replaced with a new key change request + // scheduled in the future. This case should be treated as a no-op. + if info.PendingWorkerKey == nil || info.PendingWorkerKey.EffectiveAt > rt.CurrEpoch() { + return + } + + info.Worker = info.PendingWorkerKey.NewWorker + info.PendingWorkerKey = nil + err := st.SaveInfo(adt.AsStore(rt), info) + builtin.RequireNoErr(rt, err, exitcode.ErrSerialization, "failed to save miner info") + }) + return nil +} + +// Requests the current epoch target block reward from the reward actor. +// return value includes reward, smoothed estimate of reward, and baseline power +func requestCurrentEpochBlockReward(rt Runtime) reward.ThisEpochRewardReturn { + var ret reward.ThisEpochRewardReturn + code := rt.Send(builtin.RewardActorAddr, builtin.MethodsReward.ThisEpochReward, nil, big.Zero(), &ret) + builtin.RequireSuccess(rt, code, "failed to check epoch baseline power") + return ret +} + +// Requests the current network total power and pledge from the power actor. +func requestCurrentTotalPower(rt Runtime) *power.CurrentTotalPowerReturn { + var pwr power.CurrentTotalPowerReturn + code := rt.Send(builtin.StoragePowerActorAddr, builtin.MethodsPower.CurrentTotalPower, nil, big.Zero(), &pwr) + builtin.RequireSuccess(rt, code, "failed to check current power") + return &pwr +} + +// Verifies that the total locked balance exceeds the sum of sector initial pledges. +func verifyPledgeMeetsInitialRequirements(rt Runtime, st *State) { + if !st.MeetsInitialPledgeCondition(rt.CurrentBalance()) { + rt.Abortf(exitcode.ErrInsufficientFunds, + "unlocked balance does not cover pledge requirements (%v < %v)", + st.GetUnlockedBalance(rt.CurrentBalance()), st.InitialPledgeRequirement) + } +} + +// Resolves an address to an ID address and verifies that it is address of an account or multisig actor. +func resolveControlAddress(rt Runtime, raw addr.Address) addr.Address { + resolved, ok := rt.ResolveAddress(raw) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "unable to resolve address %v", raw) + } + Assert(resolved.Protocol() == addr.ID) + + ownerCode, ok := rt.GetActorCodeCID(resolved) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "no code for address %v", resolved) + } + if !builtin.IsPrincipal(ownerCode) { + rt.Abortf(exitcode.ErrIllegalArgument, "owner actor type must be a principal, was %v", ownerCode) + } + return resolved +} + +// Resolves an address to an ID address and verifies that it is address of an account actor with an associated BLS key. +// The worker must be BLS since the worker key will be used alongside a BLS-VRF. +func resolveWorkerAddress(rt Runtime, raw addr.Address) addr.Address { + resolved, ok := rt.ResolveAddress(raw) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "unable to resolve address %v", raw) + } + Assert(resolved.Protocol() == addr.ID) + + ownerCode, ok := rt.GetActorCodeCID(resolved) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "no code for address %v", resolved) + } + if ownerCode != builtin.AccountActorCodeID { + rt.Abortf(exitcode.ErrIllegalArgument, "worker actor type must be an account, was %v", ownerCode) + } + + if raw.Protocol() != addr.BLS { + var pubkey addr.Address + code := rt.Send(resolved, builtin.MethodsAccount.PubkeyAddress, nil, big.Zero(), &pubkey) + builtin.RequireSuccess(rt, code, "failed to fetch account pubkey from %v", resolved) + if pubkey.Protocol() != addr.BLS { + rt.Abortf(exitcode.ErrIllegalArgument, "worker account %v must have BLS pubkey, was %v", resolved, pubkey.Protocol()) + } + } + return resolved +} + +func burnFunds(rt Runtime, amt abi.TokenAmount) { + if amt.GreaterThan(big.Zero()) { + code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, amt, &builtin.Discard{}) + builtin.RequireSuccess(rt, code, "failed to burn funds") + } +} + +func notifyPledgeChanged(rt Runtime, pledgeDelta abi.TokenAmount) { + if !pledgeDelta.IsZero() { + code := rt.Send(builtin.StoragePowerActorAddr, builtin.MethodsPower.UpdatePledgeTotal, &pledgeDelta, big.Zero(), &builtin.Discard{}) + builtin.RequireSuccess(rt, code, "failed to update total pledge") + } +} + +// Assigns proving period offset randomly in the range [0, WPoStProvingPeriod) by hashing +// the actor's address and current epoch. +func assignProvingPeriodOffset(myAddr addr.Address, currEpoch abi.ChainEpoch, hash func(data []byte) [32]byte) (abi.ChainEpoch, error) { + offsetSeed := bytes.Buffer{} + err := myAddr.MarshalCBOR(&offsetSeed) + if err != nil { + return 0, fmt.Errorf("failed to serialize address: %w", err) + } + + err = binary.Write(&offsetSeed, binary.BigEndian, currEpoch) + if err != nil { + return 0, fmt.Errorf("failed to serialize epoch: %w", err) + } + + digest := hash(offsetSeed.Bytes()) + var offset uint64 + err = binary.Read(bytes.NewBuffer(digest[:]), binary.BigEndian, &offset) + if err != nil { + return 0, fmt.Errorf("failed to interpret digest: %w", err) + } + + offset = offset % uint64(WPoStProvingPeriod) + return abi.ChainEpoch(offset), nil +} + +// Computes the epoch at which a proving period should start such that it is greater than the current epoch, and +// has a defined offset from being an exact multiple of WPoStProvingPeriod. +// A miner is exempt from Winow PoSt until the first full proving period starts. +func nextProvingPeriodStart(currEpoch abi.ChainEpoch, offset abi.ChainEpoch) abi.ChainEpoch { + currModulus := currEpoch % WPoStProvingPeriod + var periodProgress abi.ChainEpoch // How far ahead is currEpoch from previous offset boundary. + if currModulus >= offset { + periodProgress = currModulus - offset + } else { + periodProgress = WPoStProvingPeriod - (offset - currModulus) + } + + periodStart := currEpoch - periodProgress + WPoStProvingPeriod + Assert(periodStart > currEpoch) + return periodStart +} + +// Computes deadline information for a fault or recovery declaration. +// If the deadline has not yet elapsed, the declaration is taken as being for the current proving period. +// If the deadline has elapsed, it's instead taken as being for the next proving period after the current epoch. +func declarationDeadlineInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch) (*dline.Info, error) { + if deadlineIdx >= WPoStPeriodDeadlines { + return nil, fmt.Errorf("invalid deadline %d, must be < %d", deadlineIdx, WPoStPeriodDeadlines) + } + + deadline := NewDeadlineInfo(periodStart, deadlineIdx, currEpoch).NextNotElapsed() + return deadline, nil +} + +// Checks that a fault or recovery declaration at a specific deadline is outside the exclusion window for the deadline. +func validateFRDeclarationDeadline(deadline *dline.Info) error { + if deadline.FaultCutoffPassed() { + return fmt.Errorf("late fault or recovery declaration at %v", deadline) + } + return nil +} + +// Validates that a partition contains the given sectors. +func validatePartitionContainsSectors(partition *Partition, sectors bitfield.BitField) error { + // Check that the declared sectors are actually assigned to the partition. + contains, err := BitFieldContainsAll(partition.Sectors, sectors) + if err != nil { + return xerrors.Errorf("failed to check sectors: %w", err) + } + if !contains { + return xerrors.Errorf("not all sectors are assigned to the partition") + } + return nil +} + +func terminationPenalty(sectorSize abi.SectorSize, currEpoch abi.ChainEpoch, networkVersion network.Version, + rewardEstimate, networkQAPowerEstimate *smoothing.FilterEstimate, sectors []*SectorOnChainInfo) abi.TokenAmount { + totalFee := big.Zero() + for _, s := range sectors { + sectorPower := QAPowerForSector(sectorSize, s) + fee := PledgePenaltyForTermination(s.ExpectedDayReward, s.ExpectedStoragePledge, currEpoch-s.Activation, rewardEstimate, + networkQAPowerEstimate, sectorPower, networkVersion) + totalFee = big.Add(fee, totalFee) + } + return totalFee +} + +func PowerForSector(sectorSize abi.SectorSize, sector *SectorOnChainInfo) PowerPair { + return PowerPair{ + Raw: big.NewIntUnsigned(uint64(sectorSize)), + QA: QAPowerForSector(sectorSize, sector), + } +} + +// Returns the sum of the raw byte and quality-adjusted power for sectors. +func PowerForSectors(ssize abi.SectorSize, sectors []*SectorOnChainInfo) PowerPair { + qa := big.Zero() + for _, s := range sectors { + qa = big.Add(qa, QAPowerForSector(ssize, s)) + } + + return PowerPair{ + Raw: big.Mul(big.NewIntUnsigned(uint64(ssize)), big.NewIntUnsigned(uint64(len(sectors)))), + QA: qa, + } +} + +// The oldest seal challenge epoch that will be accepted in the current epoch. +func sealChallengeEarliest(currEpoch abi.ChainEpoch, proof abi.RegisteredSealProof) abi.ChainEpoch { + return currEpoch - ChainFinality - MaxSealDuration[proof] +} + +func getMinerInfo(rt Runtime, st *State) *MinerInfo { + info, err := st.GetInfo(adt.AsStore(rt)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "could not read miner info") + return info +} + +func min64(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +func max64(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func minEpoch(a, b abi.ChainEpoch) abi.ChainEpoch { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/miner_state.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/miner_state.go new file mode 100644 index 0000000000..ce04057a8f --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/miner_state.go @@ -0,0 +1,1004 @@ +package miner + +import ( + "fmt" + "reflect" + "sort" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/dline" + xc "github.com/filecoin-project/go-state-types/exitcode" + cid "github.com/ipfs/go-cid" + errors "github.com/pkg/errors" + xerrors "golang.org/x/xerrors" + + "github.com/filecoin-project/specs-actors/actors/builtin" + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +// Balance of Miner Actor should be greater than or equal to +// the sum of PreCommitDeposits and LockedFunds. +// It is possible for balance to fall below the sum of +// PCD, LF and InitialPledgeRequirements, and this is a bad +// state (IP Debt) that limits a miner actor's behavior (i.e. no balance withdrawals) +// Excess balance as computed by st.GetAvailableBalance will be +// withdrawable or usable for pre-commit deposit or pledge lock-up. +type State struct { + // Information not related to sectors. + Info cid.Cid + + PreCommitDeposits abi.TokenAmount // Total funds locked as PreCommitDeposits + LockedFunds abi.TokenAmount // Total rewards and added funds locked in vesting table + + VestingFunds cid.Cid // VestingFunds (Vesting Funds schedule for the miner). + + InitialPledgeRequirement abi.TokenAmount // Sum of initial pledge requirements of all active sectors + + // Sectors that have been pre-committed but not yet proven. + PreCommittedSectors cid.Cid // Map, HAMT[SectorNumber]SectorPreCommitOnChainInfo + + // PreCommittedSectorsExpiry maintains the state required to expire PreCommittedSectors. + PreCommittedSectorsExpiry cid.Cid // BitFieldQueue (AMT[Epoch]*BitField) + + // Allocated sector IDs. Sector IDs can never be reused once allocated. + AllocatedSectors cid.Cid // BitField + + // Information for all proven and not-yet-garbage-collected sectors. + // + // Sectors are removed from this AMT when the partition to which the + // sector belongs is compacted. + Sectors cid.Cid // Array, AMT[SectorNumber]SectorOnChainInfo (sparse) + + // The first epoch in this miner's current proving period. This is the first epoch in which a PoSt for a + // partition at the miner's first deadline may arrive. Alternatively, it is after the last epoch at which + // a PoSt for the previous window is valid. + // Always greater than zero, this may be greater than the current epoch for genesis miners in the first + // WPoStProvingPeriod epochs of the chain; the epochs before the first proving period starts are exempt from Window + // PoSt requirements. + // Updated at the end of every period by a cron callback. + ProvingPeriodStart abi.ChainEpoch + + // Index of the deadline within the proving period beginning at ProvingPeriodStart that has not yet been + // finalized. + // Updated at the end of each deadline window by a cron callback. + CurrentDeadline uint64 + + // The sector numbers due for PoSt at each deadline in the current proving period, frozen at period start. + // New sectors are added and expired ones removed at proving period boundary. + // Faults are not subtracted from this in state, but on the fly. + Deadlines cid.Cid + + // Deadlines with outstanding fees for early sector termination. + EarlyTerminations bitfield.BitField +} + +type MinerInfo struct { + // Account that owns this miner. + // - Income and returned collateral are paid to this address. + // - This address is also allowed to change the worker address for the miner. + Owner addr.Address // Must be an ID-address. + + // Worker account for this miner. + // The associated pubkey-type address is used to sign blocks and messages on behalf of this miner. + Worker addr.Address // Must be an ID-address. + + // Additional addresses that are permitted to submit messages controlling this actor (optional). + ControlAddresses []addr.Address // Must all be ID addresses. + + PendingWorkerKey *WorkerKeyChange + + // Byte array representing a Libp2p identity that should be used when connecting to this miner. + PeerId abi.PeerID + + // Slice of byte arrays representing Libp2p multi-addresses used for establishing a connection with this miner. + Multiaddrs []abi.Multiaddrs + + // The proof type used by this miner for sealing sectors. + SealProofType abi.RegisteredSealProof + + // Amount of space in each sector committed by this miner. + // This is computed from the proof type and represented here redundantly. + SectorSize abi.SectorSize + + // The number of sectors in each Window PoSt partition (proof). + // This is computed from the proof type and represented here redundantly. + WindowPoStPartitionSectors uint64 +} + +type WorkerKeyChange struct { + NewWorker addr.Address // Must be an ID address + EffectiveAt abi.ChainEpoch +} + +// Information provided by a miner when pre-committing a sector. +type SectorPreCommitInfo struct { + SealProof abi.RegisteredSealProof + SectorNumber abi.SectorNumber + SealedCID cid.Cid `checked:"true"` // CommR + SealRandEpoch abi.ChainEpoch + DealIDs []abi.DealID + Expiration abi.ChainEpoch + ReplaceCapacity bool // Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs) + // The committed capacity sector to replace, and it's deadline/partition location + ReplaceSectorDeadline uint64 + ReplaceSectorPartition uint64 + ReplaceSectorNumber abi.SectorNumber +} + +// Information stored on-chain for a pre-committed sector. +type SectorPreCommitOnChainInfo struct { + Info SectorPreCommitInfo + PreCommitDeposit abi.TokenAmount + PreCommitEpoch abi.ChainEpoch + DealWeight abi.DealWeight // Integral of active deals over sector lifetime + VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime +} + +// Information stored on-chain for a proven sector. +type SectorOnChainInfo struct { + SectorNumber abi.SectorNumber + SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s + SealedCID cid.Cid // CommR + DealIDs []abi.DealID + Activation abi.ChainEpoch // Epoch during which the sector proof was accepted + Expiration abi.ChainEpoch // Epoch during which the sector expires + DealWeight abi.DealWeight // Integral of active deals over sector lifetime + VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime + InitialPledge abi.TokenAmount // Pledge collected to commit this sector + ExpectedDayReward abi.TokenAmount // Expected one day projection of reward for sector computed at activation time + ExpectedStoragePledge abi.TokenAmount // Expected twenty day projection of reward for sector computed at activation time +} + +func ConstructState(infoCid cid.Cid, periodStart abi.ChainEpoch, emptyBitfieldCid, emptyArrayCid, emptyMapCid, emptyDeadlinesCid cid.Cid, + emptyVestingFundsCid cid.Cid) (*State, error) { + return &State{ + Info: infoCid, + + PreCommitDeposits: abi.NewTokenAmount(0), + LockedFunds: abi.NewTokenAmount(0), + + VestingFunds: emptyVestingFundsCid, + + InitialPledgeRequirement: abi.NewTokenAmount(0), + + PreCommittedSectors: emptyMapCid, + PreCommittedSectorsExpiry: emptyArrayCid, + AllocatedSectors: emptyBitfieldCid, + Sectors: emptyArrayCid, + ProvingPeriodStart: periodStart, + CurrentDeadline: 0, + Deadlines: emptyDeadlinesCid, + EarlyTerminations: bitfield.New(), + }, nil +} + +func ConstructMinerInfo(owner addr.Address, worker addr.Address, controlAddrs []addr.Address, pid []byte, + multiAddrs [][]byte, sealProofType abi.RegisteredSealProof) (*MinerInfo, error) { + + sectorSize, err := sealProofType.SectorSize() + if err != nil { + return nil, err + } + + partitionSectors, err := builtin.SealProofWindowPoStPartitionSectors(sealProofType) + if err != nil { + return nil, err + } + return &MinerInfo{ + Owner: owner, + Worker: worker, + ControlAddresses: controlAddrs, + PendingWorkerKey: nil, + PeerId: pid, + Multiaddrs: multiAddrs, + SealProofType: sealProofType, + SectorSize: sectorSize, + WindowPoStPartitionSectors: partitionSectors, + }, nil +} + +func (st *State) GetInfo(store adt.Store) (*MinerInfo, error) { + var info MinerInfo + if err := store.Get(store.Context(), st.Info, &info); err != nil { + return nil, xerrors.Errorf("failed to get miner info %w", err) + } + return &info, nil +} + +func (st *State) SaveInfo(store adt.Store, info *MinerInfo) error { + c, err := store.Put(store.Context(), info) + if err != nil { + return err + } + st.Info = c + return nil +} + +// Returns deadline calculations for the current (according to state) proving period. +func (st *State) DeadlineInfo(currEpoch abi.ChainEpoch) *dline.Info { + return NewDeadlineInfo(st.ProvingPeriodStart, st.CurrentDeadline, currEpoch) +} + +// Returns deadline calculations for the current (according to state) proving period. +func (st *State) QuantSpecForDeadline(dlIdx uint64) QuantSpec { + return QuantSpecForDeadline(NewDeadlineInfo(st.ProvingPeriodStart, dlIdx, 0)) +} + +func (st *State) AllocateSectorNumber(store adt.Store, sectorNo abi.SectorNumber) error { + // This will likely already have been checked, but this is a good place + // to catch any mistakes. + if sectorNo > abi.MaxSectorNumber { + return xc.ErrIllegalArgument.Wrapf("sector number out of range: %d", sectorNo) + } + + var allocatedSectors bitfield.BitField + if err := store.Get(store.Context(), st.AllocatedSectors, &allocatedSectors); err != nil { + return xc.ErrIllegalState.Wrapf("failed to load allocated sectors bitfield: %w", err) + } + if allocated, err := allocatedSectors.IsSet(uint64(sectorNo)); err != nil { + return xc.ErrIllegalState.Wrapf("failed to lookup sector number in allocated sectors bitfield: %w", err) + } else if allocated { + return xc.ErrIllegalArgument.Wrapf("sector number %d has already been allocated", sectorNo) + } + allocatedSectors.Set(uint64(sectorNo)) + + if root, err := store.Put(store.Context(), allocatedSectors); err != nil { + return xc.ErrIllegalArgument.Wrapf("failed to store allocated sectors bitfield after adding sector %d: %w", sectorNo, err) + } else { + st.AllocatedSectors = root + } + return nil +} + +func (st *State) MaskSectorNumbers(store adt.Store, sectorNos bitfield.BitField) error { + lastSectorNo, err := sectorNos.Last() + if err != nil { + return xc.ErrIllegalArgument.Wrapf("invalid mask bitfield: %w", err) + } + + if lastSectorNo > abi.MaxSectorNumber { + return xc.ErrIllegalArgument.Wrapf("masked sector number %d exceeded max sector number", lastSectorNo) + } + + var allocatedSectors bitfield.BitField + if err := store.Get(store.Context(), st.AllocatedSectors, &allocatedSectors); err != nil { + return xc.ErrIllegalState.Wrapf("failed to load allocated sectors bitfield: %w", err) + } + + allocatedSectors, err = bitfield.MergeBitFields(allocatedSectors, sectorNos) + if err != nil { + return xc.ErrIllegalState.Wrapf("failed to merge allocated bitfield with mask: %w", err) + } + + if root, err := store.Put(store.Context(), allocatedSectors); err != nil { + return xc.ErrIllegalArgument.Wrapf("failed to mask allocated sectors bitfield: %w", err) + } else { + st.AllocatedSectors = root + } + return nil +} + +func (st *State) PutPrecommittedSector(store adt.Store, info *SectorPreCommitOnChainInfo) error { + precommitted, err := adt.AsMap(store, st.PreCommittedSectors) + if err != nil { + return err + } + + err = precommitted.Put(SectorKey(info.Info.SectorNumber), info) + if err != nil { + return errors.Wrapf(err, "failed to store precommitment for %v", info) + } + st.PreCommittedSectors, err = precommitted.Root() + return err +} + +func (st *State) GetPrecommittedSector(store adt.Store, sectorNo abi.SectorNumber) (*SectorPreCommitOnChainInfo, bool, error) { + precommitted, err := adt.AsMap(store, st.PreCommittedSectors) + if err != nil { + return nil, false, err + } + + var info SectorPreCommitOnChainInfo + found, err := precommitted.Get(SectorKey(sectorNo), &info) + if err != nil { + return nil, false, errors.Wrapf(err, "failed to load precommitment for %v", sectorNo) + } + return &info, found, nil +} + +// This method gets and returns the requested pre-committed sectors, skipping +// missing sectors. +func (st *State) FindPrecommittedSectors(store adt.Store, sectorNos ...abi.SectorNumber) ([]*SectorPreCommitOnChainInfo, error) { + precommitted, err := adt.AsMap(store, st.PreCommittedSectors) + if err != nil { + return nil, err + } + + result := make([]*SectorPreCommitOnChainInfo, 0, len(sectorNos)) + + for _, sectorNo := range sectorNos { + var info SectorPreCommitOnChainInfo + found, err := precommitted.Get(SectorKey(sectorNo), &info) + if err != nil { + return nil, errors.Wrapf(err, "failed to load precommitment for %v", sectorNo) + } + if !found { + // TODO #564 log: "failed to get precommitted sector on sector %d, dropping from prove commit set" + continue + } + result = append(result, &info) + } + + return result, nil +} + +func (st *State) DeletePrecommittedSectors(store adt.Store, sectorNos ...abi.SectorNumber) error { + precommitted, err := adt.AsMap(store, st.PreCommittedSectors) + if err != nil { + return err + } + + for _, sectorNo := range sectorNos { + err = precommitted.Delete(SectorKey(sectorNo)) + if err != nil { + return xerrors.Errorf("failed to delete precommitment for %v: %w", sectorNo, err) + } + } + st.PreCommittedSectors, err = precommitted.Root() + return err +} + +func (st *State) HasSectorNo(store adt.Store, sectorNo abi.SectorNumber) (bool, error) { + sectors, err := LoadSectors(store, st.Sectors) + if err != nil { + return false, err + } + + _, found, err := sectors.Get(sectorNo) + if err != nil { + return false, xerrors.Errorf("failed to get sector %v: %w", sectorNo, err) + } + return found, nil +} + +func (st *State) PutSectors(store adt.Store, newSectors ...*SectorOnChainInfo) error { + sectors, err := LoadSectors(store, st.Sectors) + if err != nil { + return xerrors.Errorf("failed to load sectors: %w", err) + } + + err = sectors.Store(newSectors...) + if err != nil { + return err + } + + st.Sectors, err = sectors.Root() + if err != nil { + return xerrors.Errorf("failed to persist sectors: %w", err) + } + return nil +} + +func (st *State) GetSector(store adt.Store, sectorNo abi.SectorNumber) (*SectorOnChainInfo, bool, error) { + sectors, err := LoadSectors(store, st.Sectors) + if err != nil { + return nil, false, err + } + + return sectors.Get(sectorNo) +} + +func (st *State) DeleteSectors(store adt.Store, sectorNos bitfield.BitField) error { + sectors, err := LoadSectors(store, st.Sectors) + if err != nil { + return err + } + err = sectorNos.ForEach(func(sectorNo uint64) error { + if err = sectors.Delete(sectorNo); err != nil { + return xerrors.Errorf("failed to delete sector %v: %w", sectorNos, err) + } + return nil + }) + if err != nil { + return err + } + + st.Sectors, err = sectors.Root() + return err +} + +// Iterates sectors. +// The pointer provided to the callback is not safe for re-use. Copy the pointed-to value in full to hold a reference. +func (st *State) ForEachSector(store adt.Store, f func(*SectorOnChainInfo)) error { + sectors, err := LoadSectors(store, st.Sectors) + if err != nil { + return err + } + var sector SectorOnChainInfo + return sectors.ForEach(§or, func(idx int64) error { + f(§or) + return nil + }) +} + +func (st *State) FindSector(store adt.Store, sno abi.SectorNumber) (uint64, uint64, error) { + deadlines, err := st.LoadDeadlines(store) + if err != nil { + return 0, 0, err + } + return FindSector(store, deadlines, sno) +} + +// Schedules each sector to expire at its next deadline end. If it can't find +// any given sector, it skips it. +// +// This method assumes that each sector's power has not changed, despite the rescheduling. +// +// Note: this method is used to "upgrade" sectors, rescheduling the now-replaced +// sectors to expire at the end of the next deadline. Given the expense of +// sealing a sector, this function skips missing/faulty/terminated "upgraded" +// sectors instead of failing. That way, the new sectors can still be proved. +func (st *State) RescheduleSectorExpirations( + store adt.Store, currEpoch abi.ChainEpoch, ssize abi.SectorSize, + deadlineSectors DeadlineSectorMap, +) error { + deadlines, err := st.LoadDeadlines(store) + if err != nil { + return err + } + sectors, err := LoadSectors(store, st.Sectors) + if err != nil { + return err + } + + if err = deadlineSectors.ForEach(func(dlIdx uint64, pm PartitionSectorMap) error { + dlInfo := NewDeadlineInfo(st.ProvingPeriodStart, dlIdx, currEpoch).NextNotElapsed() + newExpiration := dlInfo.Last() + + dl, err := deadlines.LoadDeadline(store, dlIdx) + if err != nil { + return err + } + + if err := dl.RescheduleSectorExpirations(store, sectors, newExpiration, pm, ssize, QuantSpecForDeadline(dlInfo)); err != nil { + return err + } + + if err := deadlines.UpdateDeadline(store, dlIdx, dl); err != nil { + return err + } + + return nil + }); err != nil { + return err + } + return st.SaveDeadlines(store, deadlines) +} + +// Assign new sectors to deadlines. +func (st *State) AssignSectorsToDeadlines( + store adt.Store, + currentEpoch abi.ChainEpoch, + sectors []*SectorOnChainInfo, + partitionSize uint64, + sectorSize abi.SectorSize, +) (PowerPair, error) { + deadlines, err := st.LoadDeadlines(store) + if err != nil { + return NewPowerPairZero(), err + } + + // Sort sectors by number to get better runs in partition bitfields. + sort.Slice(sectors, func(i, j int) bool { + return sectors[i].SectorNumber < sectors[j].SectorNumber + }) + + var deadlineArr [WPoStPeriodDeadlines]*Deadline + err = deadlines.ForEach(store, func(idx uint64, dl *Deadline) error { + // Skip deadlines that aren't currently mutable. + if deadlineIsMutable(st.ProvingPeriodStart, idx, currentEpoch) { + deadlineArr[int(idx)] = dl + } + return nil + }) + if err != nil { + return NewPowerPairZero(), err + } + + newPower := NewPowerPairZero() + for dlIdx, deadlineSectors := range assignDeadlines(partitionSize, &deadlineArr, sectors) { + if len(deadlineSectors) == 0 { + continue + } + + quant := st.QuantSpecForDeadline(uint64(dlIdx)) + dl := deadlineArr[dlIdx] + + deadlineNewPower, err := dl.AddSectors(store, partitionSize, deadlineSectors, sectorSize, quant) + if err != nil { + return NewPowerPairZero(), err + } + + newPower = newPower.Add(deadlineNewPower) + + err = deadlines.UpdateDeadline(store, uint64(dlIdx), dl) + if err != nil { + return NewPowerPairZero(), err + } + } + + err = st.SaveDeadlines(store, deadlines) + if err != nil { + return NewPowerPairZero(), err + } + return newPower, nil +} + +// Pops up to max early terminated sectors from all deadlines. +// +// Returns hasMore if we still have more early terminations to process. +func (st *State) PopEarlyTerminations(store adt.Store, maxPartitions, maxSectors uint64) (result TerminationResult, hasMore bool, err error) { + stopErr := errors.New("stop error") + + // Anything to do? This lets us avoid loading the deadlines if there's nothing to do. + noEarlyTerminations, err := st.EarlyTerminations.IsEmpty() + if err != nil { + return TerminationResult{}, false, xerrors.Errorf("failed to count deadlines with early terminations: %w", err) + } else if noEarlyTerminations { + return TerminationResult{}, false, nil + } + + // Load deadlines + deadlines, err := st.LoadDeadlines(store) + if err != nil { + return TerminationResult{}, false, xerrors.Errorf("failed to load deadlines: %w", err) + } + + // Process early terminations. + if err = st.EarlyTerminations.ForEach(func(dlIdx uint64) error { + // Load deadline + partitions. + dl, err := deadlines.LoadDeadline(store, dlIdx) + if err != nil { + return xerrors.Errorf("failed to load deadline %d: %w", dlIdx, err) + } + + deadlineResult, more, err := dl.PopEarlyTerminations(store, maxPartitions-result.PartitionsProcessed, maxSectors-result.SectorsProcessed) + if err != nil { + return xerrors.Errorf("failed to pop early terminations for deadline %d: %w", dlIdx, err) + } + + err = result.Add(deadlineResult) + if err != nil { + return xerrors.Errorf("failed to merge result from popping early terminations from deadline: %w", err) + } + + if !more { + // safe to do while iterating. + st.EarlyTerminations.Unset(dlIdx) + } + + // Save the deadline + err = deadlines.UpdateDeadline(store, dlIdx, dl) + if err != nil { + return xerrors.Errorf("failed to store deadline %d: %w", dlIdx, err) + } + + if result.BelowLimit(maxPartitions, maxSectors) { + return nil + } + + return stopErr + }); err != nil && err != stopErr { + return TerminationResult{}, false, xerrors.Errorf("failed to walk early terminations bitfield for deadlines: %w", err) + } + + // Save back the deadlines. + err = st.SaveDeadlines(store, deadlines) + if err != nil { + return TerminationResult{}, false, xerrors.Errorf("failed to save deadlines: %w", err) + } + + // Ok, check to see if we've handled all early terminations. + noEarlyTerminations, err = st.EarlyTerminations.IsEmpty() + if err != nil { + return TerminationResult{}, false, xerrors.Errorf("failed to count remaining early terminations deadlines") + } + + return result, !noEarlyTerminations, nil +} + +// Returns an error if the target sector cannot be found and/or is faulty/terminated. +func (st *State) CheckSectorHealth(store adt.Store, dlIdx, pIdx uint64, sector abi.SectorNumber) error { + dls, err := st.LoadDeadlines(store) + if err != nil { + return err + } + + dl, err := dls.LoadDeadline(store, dlIdx) + if err != nil { + return err + } + + partition, err := dl.LoadPartition(store, pIdx) + if err != nil { + return err + } + + if exists, err := partition.Sectors.IsSet(uint64(sector)); err != nil { + return xc.ErrIllegalState.Wrapf("failed to decode sectors bitfield (deadline %d, partition %d): %w", dlIdx, pIdx, err) + } else if !exists { + return xc.ErrNotFound.Wrapf("sector %d not a member of partition %d, deadline %d", sector, pIdx, dlIdx) + } + + if faulty, err := partition.Faults.IsSet(uint64(sector)); err != nil { + return xc.ErrIllegalState.Wrapf("failed to decode faults bitfield (deadline %d, partition %d): %w", dlIdx, pIdx, err) + } else if faulty { + return xc.ErrForbidden.Wrapf("sector %d of partition %d, deadline %d is faulty", sector, pIdx, dlIdx) + } + + if terminated, err := partition.Terminated.IsSet(uint64(sector)); err != nil { + return xc.ErrIllegalState.Wrapf("failed to decode terminated bitfield (deadline %d, partition %d): %w", dlIdx, pIdx, err) + } else if terminated { + return xc.ErrNotFound.Wrapf("sector %d of partition %d, deadline %d is terminated", sector, pIdx, dlIdx) + } + + return nil +} + +// Loads sector info for a sequence of sectors. +func (st *State) LoadSectorInfos(store adt.Store, sectors bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectorsArr, err := LoadSectors(store, st.Sectors) + if err != nil { + return nil, err + } + return sectorsArr.Load(sectors) +} + +// Loads info for a set of sectors to be proven. +// If any of the sectors are declared faulty and not to be recovered, info for the first non-faulty sector is substituted instead. +// If any of the sectors are declared recovered, they are returned from this method. +func (st *State) LoadSectorInfosForProof(store adt.Store, provenSectors, expectedFaults bitfield.BitField) ([]*SectorOnChainInfo, error) { + nonFaults, err := bitfield.SubtractBitField(provenSectors, expectedFaults) + if err != nil { + return nil, xerrors.Errorf("failed to diff bitfields: %w", err) + } + + // Return empty if no non-faults + if empty, err := nonFaults.IsEmpty(); err != nil { + return nil, xerrors.Errorf("failed to check if bitfield was empty: %w", err) + } else if empty { + return nil, nil + } + + // Select a non-faulty sector as a substitute for faulty ones. + goodSectorNo, err := nonFaults.First() + if err != nil { + return nil, xerrors.Errorf("failed to get first good sector: %w", err) + } + + // Load sector infos + sectorInfos, err := st.LoadSectorInfosWithFaultMask(store, provenSectors, expectedFaults, abi.SectorNumber(goodSectorNo)) + if err != nil { + return nil, xerrors.Errorf("failed to load sector infos: %w", err) + } + return sectorInfos, nil +} + +// Loads sector info for a sequence of sectors, substituting info for a stand-in sector for any that are faulty. +func (st *State) LoadSectorInfosWithFaultMask(store adt.Store, sectors bitfield.BitField, faults bitfield.BitField, faultStandIn abi.SectorNumber) ([]*SectorOnChainInfo, error) { + sectorArr, err := LoadSectors(store, st.Sectors) + if err != nil { + return nil, xerrors.Errorf("failed to load sectors array: %w", err) + } + standInInfo, err := sectorArr.MustGet(faultStandIn) + if err != nil { + return nil, fmt.Errorf("failed to load stand-in sector %d: %v", faultStandIn, err) + } + + // Expand faults into a map for quick lookups. + // The faults bitfield should already be a subset of the sectors bitfield. + sectorCount, err := sectors.Count() + if err != nil { + return nil, err + } + faultSet, err := faults.AllMap(sectorCount) + if err != nil { + return nil, fmt.Errorf("failed to expand faults: %w", err) + } + + // Load the sector infos, masking out fault sectors with a good one. + sectorInfos := make([]*SectorOnChainInfo, 0, sectorCount) + err = sectors.ForEach(func(i uint64) error { + sector := standInInfo + faulty := faultSet[i] + if !faulty { + sectorOnChain, err := sectorArr.MustGet(abi.SectorNumber(i)) + if err != nil { + return xerrors.Errorf("failed to load sector %d: %w", i, err) + } + sector = sectorOnChain + } + sectorInfos = append(sectorInfos, sector) + return nil + }) + return sectorInfos, err +} + +func (st *State) LoadDeadlines(store adt.Store) (*Deadlines, error) { + var deadlines Deadlines + if err := store.Get(store.Context(), st.Deadlines, &deadlines); err != nil { + return nil, xc.ErrIllegalState.Wrapf("failed to load deadlines (%s): %w", st.Deadlines, err) + } + + return &deadlines, nil +} + +func (st *State) SaveDeadlines(store adt.Store, deadlines *Deadlines) error { + c, err := store.Put(store.Context(), deadlines) + if err != nil { + return err + } + st.Deadlines = c + return nil +} + +// LoadVestingFunds loads the vesting funds table from the store +func (st *State) LoadVestingFunds(store adt.Store) (*VestingFunds, error) { + var funds VestingFunds + if err := store.Get(store.Context(), st.VestingFunds, &funds); err != nil { + return nil, xerrors.Errorf("failed to load vesting funds (%s): %w", st.VestingFunds, err) + } + + return &funds, nil +} + +// SaveVestingFunds saves the vesting table to the store +func (st *State) SaveVestingFunds(store adt.Store, funds *VestingFunds) error { + c, err := store.Put(store.Context(), funds) + if err != nil { + return err + } + st.VestingFunds = c + return nil +} + +// +// Funds and vesting +// + +func (st *State) AddPreCommitDeposit(amount abi.TokenAmount) { + newTotal := big.Add(st.PreCommitDeposits, amount) + AssertMsg(newTotal.GreaterThanEqual(big.Zero()), "negative pre-commit deposit %s after adding %s to prior %s", + newTotal, amount, st.PreCommitDeposits) + st.PreCommitDeposits = newTotal +} + +func (st *State) AddInitialPledgeRequirement(amount abi.TokenAmount) { + newTotal := big.Add(st.InitialPledgeRequirement, amount) + AssertMsg(newTotal.GreaterThanEqual(big.Zero()), "negative initial pledge requirement %s after adding %s to prior %s", + newTotal, amount, st.InitialPledgeRequirement) + st.InitialPledgeRequirement = newTotal +} + +// AddLockedFunds first vests and unlocks the vested funds AND then locks the given funds in the vesting table. +func (st *State) AddLockedFunds(store adt.Store, currEpoch abi.ChainEpoch, vestingSum abi.TokenAmount, spec *VestSpec) (vested abi.TokenAmount, err error) { + AssertMsg(vestingSum.GreaterThanEqual(big.Zero()), "negative vesting sum %s", vestingSum) + + vestingFunds, err := st.LoadVestingFunds(store) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load vesting funds: %w", err) + } + + // unlock vested funds first + amountUnlocked := vestingFunds.unlockVestedFunds(currEpoch) + st.LockedFunds = big.Sub(st.LockedFunds, amountUnlocked) + Assert(st.LockedFunds.GreaterThanEqual(big.Zero())) + + // add locked funds now + vestingFunds.addLockedFunds(currEpoch, vestingSum, st.ProvingPeriodStart, spec) + st.LockedFunds = big.Add(st.LockedFunds, vestingSum) + + // save the updated vesting table state + if err := st.SaveVestingFunds(store, vestingFunds); err != nil { + return big.Zero(), xerrors.Errorf("failed to save vesting funds: %w", err) + } + + return amountUnlocked, nil +} + +// PenalizeFundsInPriorityOrder first unlocks unvested funds from the vesting table. +// If the target is not yet hit it deducts funds from the (new) available balance. +// Returns the amount unlocked from the vesting table and the amount taken from current balance. +// If the penalty exceeds the total amount available in the vesting table and unlocked funds +// the penalty is reduced to match. This must be fixed when handling bankrupcy: +// https://github.com/filecoin-project/specs-actors/issues/627 +func (st *State) PenalizeFundsInPriorityOrder(store adt.Store, currEpoch abi.ChainEpoch, target, unlockedBalance abi.TokenAmount) (fromVesting abi.TokenAmount, fromBalance abi.TokenAmount, err error) { + fromVesting, err = st.UnlockUnvestedFunds(store, currEpoch, target) + if err != nil { + return abi.NewTokenAmount(0), abi.NewTokenAmount(0), err + } + if fromVesting.Equals(target) { + return fromVesting, abi.NewTokenAmount(0), nil + } + + // unlocked funds were just deducted from available, so track that + remaining := big.Sub(target, fromVesting) + + fromBalance = big.Min(unlockedBalance, remaining) + return fromVesting, fromBalance, nil +} + +// Unlocks an amount of funds that have *not yet vested*, if possible. +// The soonest-vesting entries are unlocked first. +// Returns the amount actually unlocked. +func (st *State) UnlockUnvestedFunds(store adt.Store, currEpoch abi.ChainEpoch, target abi.TokenAmount) (abi.TokenAmount, error) { + vestingFunds, err := st.LoadVestingFunds(store) + if err != nil { + return big.Zero(), xerrors.Errorf("failed tp load vesting funds: %w", err) + } + + amountUnlocked := vestingFunds.unlockUnvestedFunds(currEpoch, target) + + st.LockedFunds = big.Sub(st.LockedFunds, amountUnlocked) + Assert(st.LockedFunds.GreaterThanEqual(big.Zero())) + + if err := st.SaveVestingFunds(store, vestingFunds); err != nil { + return big.Zero(), xerrors.Errorf("failed to save vesting funds: %w", err) + } + + return amountUnlocked, nil +} + +// Unlocks all vesting funds that have vested before the provided epoch. +// Returns the amount unlocked. +func (st *State) UnlockVestedFunds(store adt.Store, currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + vestingFunds, err := st.LoadVestingFunds(store) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load vesting funds: %w", err) + } + + amountUnlocked := vestingFunds.unlockVestedFunds(currEpoch) + st.LockedFunds = big.Sub(st.LockedFunds, amountUnlocked) + Assert(st.LockedFunds.GreaterThanEqual(big.Zero())) + + err = st.SaveVestingFunds(store, vestingFunds) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to save vesing funds: %w", err) + } + + return amountUnlocked, nil +} + +// CheckVestedFunds returns the amount of vested funds that have vested before the provided epoch. +func (st *State) CheckVestedFunds(store adt.Store, currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + vestingFunds, err := st.LoadVestingFunds(store) + if err != nil { + return big.Zero(), xerrors.Errorf("failed to load vesting funds: %w", err) + } + + amountVested := abi.NewTokenAmount(0) + + for i := range vestingFunds.Funds { + vf := vestingFunds.Funds[i] + epoch := vf.Epoch + amount := vf.Amount + + if epoch >= currEpoch { + break + } + + amountVested = big.Add(amountVested, amount) + } + + return amountVested, nil +} + +// Unclaimed funds that are not locked -- includes funds used to cover initial pledge requirement +func (st *State) GetUnlockedBalance(actorBalance abi.TokenAmount) abi.TokenAmount { + unlockedBalance := big.Subtract(actorBalance, st.LockedFunds, st.PreCommitDeposits) + Assert(unlockedBalance.GreaterThanEqual(big.Zero())) + return unlockedBalance +} + +// Unclaimed funds. Actor balance - (locked funds, precommit deposit, ip requirement) +// Can go negative if the miner is in IP debt +func (st *State) GetAvailableBalance(actorBalance abi.TokenAmount) abi.TokenAmount { + availableBalance := st.GetUnlockedBalance(actorBalance) + return big.Sub(availableBalance, st.InitialPledgeRequirement) +} + +func (st *State) AssertBalanceInvariants(balance abi.TokenAmount) { + Assert(st.PreCommitDeposits.GreaterThanEqual(big.Zero())) + Assert(st.LockedFunds.GreaterThanEqual(big.Zero())) + Assert(balance.GreaterThanEqual(big.Sum(st.PreCommitDeposits, st.LockedFunds))) +} + +func (st *State) MeetsInitialPledgeCondition(balance abi.TokenAmount) bool { + available := st.GetUnlockedBalance(balance) + return available.GreaterThanEqual(st.InitialPledgeRequirement) +} + +// pre-commit expiry +func (st *State) QuantSpecEveryDeadline() QuantSpec { + return NewQuantSpec(WPoStChallengeWindow, st.ProvingPeriodStart) +} + +func (st *State) AddPreCommitExpiry(store adt.Store, expireEpoch abi.ChainEpoch, sectorNum abi.SectorNumber) error { + // Load BitField Queue for sector expiry + quant := st.QuantSpecEveryDeadline() + queue, err := LoadBitfieldQueue(store, st.PreCommittedSectorsExpiry, quant) + if err != nil { + return xerrors.Errorf("failed to load pre-commit expiry queue: %w", err) + } + + // add entry for this sector to the queue + if err := queue.AddToQueueValues(expireEpoch, uint64(sectorNum)); err != nil { + return xerrors.Errorf("failed to add pre-commit sector expiry to queue: %w", err) + } + + st.PreCommittedSectorsExpiry, err = queue.Root() + if err != nil { + return xerrors.Errorf("failed to save pre-commit sector queue: %w", err) + } + + return nil +} + +func (st *State) checkPrecommitExpiry(store adt.Store, sectors bitfield.BitField) (depositToBurn abi.TokenAmount, err error) { + depositToBurn = abi.NewTokenAmount(0) + + var precommitsToDelete []abi.SectorNumber + if err = sectors.ForEach(func(i uint64) error { + sectorNo := abi.SectorNumber(i) + sector, found, err := st.GetPrecommittedSector(store, sectorNo) + if err != nil { + return err + } + if !found { + // already committed/deleted + return nil + } + + // mark it for deletion + precommitsToDelete = append(precommitsToDelete, sectorNo) + + // increment deposit to burn + depositToBurn = big.Add(depositToBurn, sector.PreCommitDeposit) + return nil + }); err != nil { + return big.Zero(), xerrors.Errorf("failed to check pre-commit expiries: %w", err) + } + + // Actually delete it. + if len(precommitsToDelete) > 0 { + if err := st.DeletePrecommittedSectors(store, precommitsToDelete...); err != nil { + return big.Zero(), fmt.Errorf("failed to delete pre-commits: %w", err) + } + } + + st.PreCommitDeposits = big.Sub(st.PreCommitDeposits, depositToBurn) + Assert(st.PreCommitDeposits.GreaterThanEqual(big.Zero())) + + // This deposit was locked separately to pledge collateral so there's no pledge change here. + return depositToBurn, nil +} + +// +// Misc helpers +// + +func SectorKey(e abi.SectorNumber) abi.Keyer { + return abi.UIntKey(uint64(e)) +} + +func init() { + // Check that ChainEpoch is indeed an unsigned integer to confirm that SectorKey is making the right interpretation. + var e abi.SectorNumber + if reflect.TypeOf(e).Kind() != reflect.Uint64 { + panic("incorrect sector number encoding") + } +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/monies.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/monies.go new file mode 100644 index 0000000000..bf0d346f15 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/monies.go @@ -0,0 +1,131 @@ +package miner + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/util/math" + "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +// IP = IPBase(precommit time) + AdditionalIP(precommit time) +// IPBase(t) = BR(t, InitialPledgeProjectionPeriod) +// AdditionalIP(t) = LockTarget(t)*PledgeShare(t) +// LockTarget = (LockTargetFactorNum / LockTargetFactorDenom) * FILCirculatingSupply(t) +// PledgeShare(t) = sectorQAPower / max(BaselinePower(t), NetworkQAPower(t)) +// PARAM_FINISH +var PreCommitDepositFactor = 20 +var InitialPledgeFactor = 20 +var PreCommitDepositProjectionPeriod = abi.ChainEpoch(PreCommitDepositFactor) * builtin.EpochsInDay +var InitialPledgeProjectionPeriod = abi.ChainEpoch(InitialPledgeFactor) * builtin.EpochsInDay +var LockTargetFactorNum = big.NewInt(3) +var LockTargetFactorDenom = big.NewInt(10) +// Cap on initial pledge requirement for sectors during the Space Race network. +// The target is 1 FIL (10**18 attoFIL) per 32GiB. +// This does not divide evenly, so the result is fractionally smaller. +var SpaceRaceInitialPledgeMaxPerByte = big.Div(big.NewInt(1e18), big.NewInt(32 << 30)) + +// FF = BR(t, DeclaredFaultProjectionPeriod) +// projection period of 2.14 days: 2880 * 2.14 = 6163.2. Rounded to nearest epoch 6163 +var DeclaredFaultFactorNumV0 = 214 +var DeclaredFaultFactorNumV3 = 351 +var DeclaredFaultFactorDenom = 100 +var DeclaredFaultProjectionPeriodV0 = abi.ChainEpoch((builtin.EpochsInDay * DeclaredFaultFactorNumV0) / DeclaredFaultFactorDenom) +var DeclaredFaultProjectionPeriodV3 = abi.ChainEpoch((builtin.EpochsInDay * DeclaredFaultFactorNumV3) / DeclaredFaultFactorDenom) + +// SP = BR(t, UndeclaredFaultProjectionPeriod) +var UndeclaredFaultFactorNumV0 = 50 +var UndeclaredFaultFactorNumV1 = 35 +var UndeclaredFaultFactorDenom = 10 + +var UndeclaredFaultProjectionPeriodV0 = abi.ChainEpoch((builtin.EpochsInDay * UndeclaredFaultFactorNumV0) / UndeclaredFaultFactorDenom) +var UndeclaredFaultProjectionPeriodV1 = abi.ChainEpoch((builtin.EpochsInDay * UndeclaredFaultFactorNumV1) / UndeclaredFaultFactorDenom) + +// Maximum number of days of BR a terminated sector can be penalized +const TerminationLifetimeCap = abi.ChainEpoch(70) + +// This is the BR(t) value of the given sector for the current epoch. +// It is the expected reward this sector would pay out over a one day period. +// BR(t) = CurrEpochReward(t) * SectorQualityAdjustedPower * EpochsInDay / TotalNetworkQualityAdjustedPower(t) +func ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate *smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { + networkQAPowerSmoothed := networkQAPowerEstimate.Estimate() + if networkQAPowerSmoothed.IsZero() { + return rewardEstimate.Estimate() + } + expectedRewardForProvingPeriod := smoothing.ExtrapolatedCumSumOfRatio(projectionDuration, 0, rewardEstimate, networkQAPowerEstimate) + br := big.Mul(qaSectorPower, expectedRewardForProvingPeriod) // Q.0 * Q.128 => Q.128 + return big.Rsh(br, math.Precision) +} + +// This is the FF(t) penalty for a sector expected to be in the fault state either because the fault was declared or because +// it has been previously detected by the network. +// FF(t) = DeclaredFaultFactor * BR(t) +func PledgePenaltyForDeclaredFault(rewardEstimate, networkQAPowerEstimate *smoothing.FilterEstimate, qaSectorPower abi.StoragePower, + networkVersion network.Version) abi.TokenAmount { + projectionPeriod := DeclaredFaultProjectionPeriodV0 + if networkVersion >= network.Version3 { + projectionPeriod = DeclaredFaultProjectionPeriodV3 + } + return ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, projectionPeriod) +} + +// This is the SP(t) penalty for a newly faulty sector that has not been declared. +// SP(t) = UndeclaredFaultFactor * BR(t) +func PledgePenaltyForUndeclaredFault(rewardEstimate, networkQAPowerEstimate *smoothing.FilterEstimate, qaSectorPower abi.StoragePower, + networkVersion network.Version) abi.TokenAmount { + projectionPeriod := UndeclaredFaultProjectionPeriodV0 + if networkVersion >= network.Version1 { + projectionPeriod = UndeclaredFaultProjectionPeriodV1 + } + return ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, projectionPeriod) +} + +// Penalty to locked pledge collateral for the termination of a sector before scheduled expiry. +// SectorAge is the time between the sector's activation and termination. +func PledgePenaltyForTermination(dayRewardAtActivation, twentyDayRewardAtActivation abi.TokenAmount, sectorAge abi.ChainEpoch, + rewardEstimate, networkQAPowerEstimate *smoothing.FilterEstimate, qaSectorPower abi.StoragePower, networkVersion network.Version) abi.TokenAmount { + // max(SP(t), BR(StartEpoch, 20d) + BR(StartEpoch, 1d)*min(SectorAgeInDays, 70)) + // and sectorAgeInDays = sectorAge / EpochsInDay + + cappedSectorAge := big.NewInt(int64(minEpoch(sectorAge, TerminationLifetimeCap*builtin.EpochsInDay))) + if networkVersion >= network.Version1 { + cappedSectorAge = big.NewInt(int64(minEpoch(sectorAge / 2, TerminationLifetimeCap*builtin.EpochsInDay))) + } + + return big.Max( + PledgePenaltyForUndeclaredFault(rewardEstimate, networkQAPowerEstimate, qaSectorPower, networkVersion), + big.Add( + twentyDayRewardAtActivation, + big.Div( + big.Mul(dayRewardAtActivation, cappedSectorAge), + big.NewInt(builtin.EpochsInDay)))) +} + +// Computes the PreCommit Deposit given sector qa weight and current network conditions. +// PreCommit Deposit = 20 * BR(t) +func PreCommitDepositForPower(rewardEstimate, networkQAPowerEstimate *smoothing.FilterEstimate, qaSectorPower abi.StoragePower) abi.TokenAmount { + return ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, PreCommitDepositProjectionPeriod) +} + +// Computes the pledge requirement for committing new quality-adjusted power to the network, given the current +// total power, total pledge commitment, epoch block reward, and circulating token supply. +// In plain language, the pledge requirement is a multiple of the block reward expected to be earned by the +// newly-committed power, holding the per-epoch block reward constant (though in reality it will change over time). +func InitialPledgeForPower(qaPower abi.StoragePower, baselinePower abi.StoragePower, networkTotalPledge abi.TokenAmount, rewardEstimate, networkQAPowerEstimate *smoothing.FilterEstimate, networkCirculatingSupplySmoothed abi.TokenAmount) abi.TokenAmount { + networkQAPower := networkQAPowerEstimate.Estimate() + ipBase := ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaPower, InitialPledgeProjectionPeriod) + + lockTargetNum := big.Mul(LockTargetFactorNum, networkCirculatingSupplySmoothed) + lockTargetDenom := LockTargetFactorDenom + pledgeShareNum := qaPower + pledgeShareDenom := big.Max(big.Max(networkQAPower, baselinePower), qaPower) // use qaPower in case others are 0 + additionalIPNum := big.Mul(lockTargetNum, pledgeShareNum) + additionalIPDenom := big.Mul(lockTargetDenom, pledgeShareDenom) + additionalIP := big.Div(additionalIPNum, additionalIPDenom) + + nominalPledge := big.Add(ipBase, additionalIP) + spaceRacePledgeCap := big.Mul(SpaceRaceInitialPledgeMaxPerByte, qaPower) + return big.Min(nominalPledge, spaceRacePledgeCap) +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/policy.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/policy.go new file mode 100644 index 0000000000..f66386486e --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/miner/policy.go @@ -0,0 +1,223 @@ +package miner + +import ( + "fmt" + + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + + builtin "github.com/filecoin-project/specs-actors/actors/builtin" + . "github.com/filecoin-project/specs-actors/actors/util" +) + +// The period over which all a miner's active sectors will be challenged. +var WPoStProvingPeriod = abi.ChainEpoch(builtin.EpochsInDay) // 24 hours + +// The duration of a deadline's challenge window, the period before a deadline when the challenge is available. +var WPoStChallengeWindow = abi.ChainEpoch(30 * 60 / builtin.EpochDurationSeconds) // 30 minutes (48 per day) + +// The number of non-overlapping PoSt deadlines in each proving period. +const WPoStPeriodDeadlines = uint64(48) + +// WPoStMaxChainCommitAge is the maximum distance back that a valid Window PoSt must commit to the current chain. +var WPoStMaxChainCommitAge = WPoStChallengeWindow + +func init() { + // Check that the challenge windows divide the proving period evenly. + if WPoStProvingPeriod%WPoStChallengeWindow != 0 { + panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow)) + } + if abi.ChainEpoch(WPoStPeriodDeadlines)*WPoStChallengeWindow != WPoStProvingPeriod { + panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow)) + } +} + +// The maximum number of sectors that a miner can have simultaneously active. +// This also bounds the number of faults that can be declared, etc. +// TODO raise this number, carefully +// https://github.com/filecoin-project/specs-actors/issues/470 +const SectorsMax = 32 << 20 // PARAM_FINISH + +// The maximum number of partitions that may be required to be loaded in a single invocation. +// This limits the number of simultaneous fault, recovery, or sector-extension declarations. +// With 48 deadlines (half-hour), 200 partitions per declaration permits loading a full EiB of 32GiB +// sectors with 1 message per epoch within a single half-hour deadline. A miner can of course submit more messages. +const AddressedPartitionsMax = 200 + +// The maximum number of sector infos that may be required to be loaded in a single invocation. +const AddressedSectorsMax = 10_000 + +// The maximum number of partitions that may be required to be loaded in a single invocation, +// when all the sector infos for the partitions will be loaded. +func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 { + return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax) +} + +// The maximum number of new sectors that may be staged by a miner during a single proving period. +const NewSectorsPerPeriodMax = 128 << 10 + +// Epochs after which chain state is final. +const ChainFinality = abi.ChainEpoch(900) + +var SealedCIDPrefix = cid.Prefix{ + Version: 1, + Codec: cid.FilCommitmentSealed, + MhType: mh.POSEIDON_BLS12_381_A1_FC1, + MhLength: 32, +} + +// List of proof types which can be used when creating new miner actors +var SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ + abi.RegisteredSealProof_StackedDrg32GiBV1: {}, + abi.RegisteredSealProof_StackedDrg64GiBV1: {}, +} + +// Maximum duration to allow for the sealing process for seal algorithms. +// Dependent on algorithm and sector size +var MaxSealDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{ + abi.RegisteredSealProof_StackedDrg32GiBV1: abi.ChainEpoch(10000), // PARAM_FINISH + abi.RegisteredSealProof_StackedDrg2KiBV1: abi.ChainEpoch(10000), + abi.RegisteredSealProof_StackedDrg8MiBV1: abi.ChainEpoch(10000), + abi.RegisteredSealProof_StackedDrg512MiBV1: abi.ChainEpoch(10000), + abi.RegisteredSealProof_StackedDrg64GiBV1: abi.ChainEpoch(10000), +} + +// Number of epochs between publishing the precommit and when the challenge for interactive PoRep is drawn +// used to ensure it is not predictable by miner. +var PreCommitChallengeDelay = abi.ChainEpoch(150) + +// Lookback from the current epoch for state view for leader elections. +const ElectionLookback = abi.ChainEpoch(1) // PARAM_FINISH + +// Lookback from the deadline's challenge window opening from which to sample chain randomness for the challenge seed. +// This lookback exists so that deadline windows can be non-overlapping (which make the programming simpler) +// but without making the miner wait for chain stability before being able to start on PoSt computation. +// The challenge is available this many epochs before the window is actually open to receiving a PoSt. +const WPoStChallengeLookback = abi.ChainEpoch(20) + +// Minimum period before a deadline's challenge window opens that a fault must be declared for that deadline. +// This lookback must not be less than WPoStChallengeLookback lest a malicious miner be able to selectively declare +// faults after learning the challenge value. +const FaultDeclarationCutoff = WPoStChallengeLookback + 50 + +// The maximum age of a fault before the sector is terminated. +var FaultMaxAge = WPoStProvingPeriod * 14 + +// Staging period for a miner worker key change. +// Finality is a harsh delay for a miner who has lost their worker key, as the miner will miss Window PoSts until +// it can be changed. It's the only safe value, though. We may implement a mitigation mechanism such as a second +// key or allowing the owner account to submit PoSts while a key change is pending. +const WorkerKeyChangeDelay = ChainFinality + +// Minimum number of epochs past the current epoch a sector may be set to expire. +const MinSectorExpiration = 180 * builtin.EpochsInDay + +// Maximum number of epochs past the current epoch a sector may be set to expire. +// The actual maximum extension will be the minimum of CurrEpoch + MaximumSectorExpirationExtension +// and sector.ActivationEpoch+sealProof.SectorMaximumLifetime() +const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay + +// Ratio of sector size to maximum deals per sector. +// The maximum number of deals is the sector size divided by this number (2^27) +// which limits 32GiB sectors to 256 deals and 64GiB sectors to 512 +const DealLimitDenominator = 134217728 + +// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector. +// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector. +// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier. +// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier. +// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier. +// SectorQuality of a sector is a weighted average of multipliers based on their propotions. +func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality { + sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration))) + totalDealSpaceTime := big.Add(dealWeight, verifiedWeight) + Assert(sectorSpaceTime.GreaterThanEqual(totalDealSpaceTime)) + + weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier) + weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier) + weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier) + weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime) + scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision) + + return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier) +} + +// Returns the power for a sector size and weight. +func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { + quality := QualityForWeight(size, duration, dealWeight, verifiedWeight) + return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision) +} + +// Returns the quality-adjusted power for a sector. +func QAPowerForSector(size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower { + duration := sector.Expiration - sector.Activation + return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight) +} + +// Determine maximum number of deal miner's sector can hold +func dealPerSectorLimit(size abi.SectorSize) uint64 { + return max64(256, uint64(size/DealLimitDenominator)) +} + +type BigFrac struct { + numerator big.Int + denominator big.Int +} + +var consensusFaultReporterInitialShare = BigFrac{ + // PARAM_FINISH + numerator: big.NewInt(1), + denominator: big.NewInt(1000), +} +var consensusFaultReporterShareGrowthRate = BigFrac{ + // PARAM_FINISH + numerator: big.NewInt(101251), + denominator: big.NewInt(100000), +} + +// Specification for a linear vesting schedule. +type VestSpec struct { + InitialDelay abi.ChainEpoch // Delay before any amount starts vesting. + VestPeriod abi.ChainEpoch // Period over which the total should vest, after the initial delay. + StepDuration abi.ChainEpoch // Duration between successive incremental vests (independent of vesting period). + Quantization abi.ChainEpoch // Maximum precision of vesting table (limits cardinality of table). +} + +var RewardVestingSpecV0 = VestSpec{ + InitialDelay: abi.ChainEpoch(20 * builtin.EpochsInDay), // PARAM_FINISH + VestPeriod: abi.ChainEpoch(180 * builtin.EpochsInDay), // PARAM_FINISH + StepDuration: abi.ChainEpoch(1 * builtin.EpochsInDay), // PARAM_FINISH + Quantization: 12 * builtin.EpochsInHour, // PARAM_FINISH +} + +var RewardVestingSpecV1 = VestSpec{ + InitialDelay: abi.ChainEpoch(0), // PARAM_FINISH + VestPeriod: abi.ChainEpoch(180 * builtin.EpochsInDay), // PARAM_FINISH + StepDuration: abi.ChainEpoch(1 * builtin.EpochsInDay), // PARAM_FINISH + Quantization: 12 * builtin.EpochsInHour, // PARAM_FINISH +} + +func RewardForConsensusSlashReport(elapsedEpoch abi.ChainEpoch, collateral abi.TokenAmount) abi.TokenAmount { + // PARAM_FINISH + // var growthRate = SLASHER_SHARE_GROWTH_RATE_NUM / SLASHER_SHARE_GROWTH_RATE_DENOM + // var multiplier = growthRate^elapsedEpoch + // var slasherProportion = min(INITIAL_SLASHER_SHARE * multiplier, 1.0) + // return collateral * slasherProportion + + // BigInt Operation + // NUM = SLASHER_SHARE_GROWTH_RATE_NUM^elapsedEpoch * INITIAL_SLASHER_SHARE_NUM * collateral + // DENOM = SLASHER_SHARE_GROWTH_RATE_DENOM^elapsedEpoch * INITIAL_SLASHER_SHARE_DENOM + // slasher_amount = min(NUM/DENOM, collateral) + maxReporterShareNum := big.NewInt(1) + maxReporterShareDen := big.NewInt(2) + + elapsed := big.NewInt(int64(elapsedEpoch)) + slasherShareNumerator := big.Exp(consensusFaultReporterShareGrowthRate.numerator, elapsed) + slasherShareDenominator := big.Exp(consensusFaultReporterShareGrowthRate.denominator, elapsed) + + num := big.Mul(big.Mul(slasherShareNumerator, consensusFaultReporterInitialShare.numerator), collateral) + denom := big.Mul(slasherShareDenominator, consensusFaultReporterInitialShare.denominator) + return big.Min(big.Div(num, denom), big.Div(big.Mul(collateral, maxReporterShareNum), maxReporterShareDen)) +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/network.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/network.go new file mode 100644 index 0000000000..c06fbdab12 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/network.go @@ -0,0 +1,54 @@ +package builtin + +import ( + "fmt" + + big "github.com/filecoin-project/go-state-types/big" +) + +// The duration of a chain epoch. +// This is used for deriving epoch-denominated periods that are more naturally expressed in clock time. +// TODO: In lieu of a real configuration mechanism for this value, we'd like to make it a var so that implementations +// can override it at runtime. Doing so requires changing all the static references to it in this repo to go through +// late-binding function calls, or they'll see the "wrong" value. +// https://github.com/filecoin-project/specs-actors/issues/353 +// If EpochDurationSeconds is changed, update `BaselineExponent`, `lambda`, and // `expLamSubOne` in ./reward/reward_logic.go +// You can re-calculate these constants by changing the epoch duration in ./reward/reward_calc.py and running it. +const EpochDurationSeconds = 30 +const SecondsInHour = 60 * 60 +const SecondsInDay = 24 * SecondsInHour +const EpochsInHour = SecondsInHour / EpochDurationSeconds +const EpochsInDay = SecondsInDay / EpochDurationSeconds + +// The expected number of block producers in each epoch. +var ExpectedLeadersPerEpoch = int64(5) + +func init() { + //noinspection GoBoolExpressions + if SecondsInHour%EpochDurationSeconds != 0 { + // This even division is an assumption that other code might unwittingly make. + // Don't rely on it on purpose, though. + // While we're pretty sure everything will still work fine, we're safer maintaining this invariant anyway. + panic(fmt.Sprintf("epoch duration %d does not evenly divide one hour (%d)", EpochDurationSeconds, SecondsInHour)) + } +} + +// Number of token units in an abstract "FIL" token. +// The network works purely in the indivisible token amounts. This constant converts to a fixed decimal with more +// human-friendly scale. +var TokenPrecision = big.NewIntUnsigned(1_000_000_000_000_000_000) + +// The maximum supply of Filecoin that will ever exist (in token units) +var TotalFilecoin = big.Mul(big.NewIntUnsigned(2_000_000_000), TokenPrecision) + +// Quality multiplier for committed capacity (no deals) in a sector +var QualityBaseMultiplier = big.NewInt(10) + +// Quality multiplier for unverified deals in a sector +var DealWeightMultiplier = big.NewInt(10) + +// Quality multiplier for verified deals in a sector +var VerifiedDealWeightMultiplier = big.NewInt(100) + +// Precision used for making QA power calculations +const SectorQualityPrecision = 20 diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/cbor_gen.go new file mode 100644 index 0000000000..9ba3c3cc1d --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/cbor_gen.go @@ -0,0 +1,1057 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package paych + +import ( + "fmt" + "io" + + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufState = []byte{134} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.From (address.Address) (struct) + if err := t.From.MarshalCBOR(w); err != nil { + return err + } + + // t.To (address.Address) (struct) + if err := t.To.MarshalCBOR(w); err != nil { + return err + } + + // t.ToSend (big.Int) (struct) + if err := t.ToSend.MarshalCBOR(w); err != nil { + return err + } + + // t.SettlingAt (abi.ChainEpoch) (int64) + if t.SettlingAt >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SettlingAt)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SettlingAt-1)); err != nil { + return err + } + } + + // t.MinSettleHeight (abi.ChainEpoch) (int64) + if t.MinSettleHeight >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinSettleHeight)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinSettleHeight-1)); err != nil { + return err + } + } + + // t.LaneStates (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.LaneStates); err != nil { + return xerrors.Errorf("failed to write cid field t.LaneStates: %w", err) + } + + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 6 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.From (address.Address) (struct) + + { + + if err := t.From.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.From: %w", err) + } + + } + // t.To (address.Address) (struct) + + { + + if err := t.To.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.To: %w", err) + } + + } + // t.ToSend (big.Int) (struct) + + { + + if err := t.ToSend.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ToSend: %w", err) + } + + } + // t.SettlingAt (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SettlingAt = abi.ChainEpoch(extraI) + } + // t.MinSettleHeight (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.MinSettleHeight = abi.ChainEpoch(extraI) + } + // t.LaneStates (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.LaneStates: %w", err) + } + + t.LaneStates = c + + } + return nil +} + +var lengthBufLaneState = []byte{130} + +func (t *LaneState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufLaneState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Redeemed (big.Int) (struct) + if err := t.Redeemed.MarshalCBOR(w); err != nil { + return err + } + + // t.Nonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + return nil +} + +func (t *LaneState) UnmarshalCBOR(r io.Reader) error { + *t = LaneState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Redeemed (big.Int) (struct) + + { + + if err := t.Redeemed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Redeemed: %w", err) + } + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + return nil +} + +var lengthBufMerge = []byte{130} + +func (t *Merge) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMerge); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Lane (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Lane)); err != nil { + return err + } + + // t.Nonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + return nil +} + +func (t *Merge) UnmarshalCBOR(r io.Reader) error { + *t = Merge{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Lane (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Lane = uint64(extra) + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + return nil +} + +var lengthBufConstructorParams = []byte{130} + +func (t *ConstructorParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufConstructorParams); err != nil { + return err + } + + // t.From (address.Address) (struct) + if err := t.From.MarshalCBOR(w); err != nil { + return err + } + + // t.To (address.Address) (struct) + if err := t.To.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ConstructorParams) UnmarshalCBOR(r io.Reader) error { + *t = ConstructorParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.From (address.Address) (struct) + + { + + if err := t.From.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.From: %w", err) + } + + } + // t.To (address.Address) (struct) + + { + + if err := t.To.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.To: %w", err) + } + + } + return nil +} + +var lengthBufUpdateChannelStateParams = []byte{131} + +func (t *UpdateChannelStateParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufUpdateChannelStateParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Sv (paych.SignedVoucher) (struct) + if err := t.Sv.MarshalCBOR(w); err != nil { + return err + } + + // t.Secret ([]uint8) (slice) + if len(t.Secret) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Secret was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Secret))); err != nil { + return err + } + + if _, err := w.Write(t.Secret[:]); err != nil { + return err + } + + // t.Proof ([]uint8) (slice) + if len(t.Proof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Proof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Proof))); err != nil { + return err + } + + if _, err := w.Write(t.Proof[:]); err != nil { + return err + } + return nil +} + +func (t *UpdateChannelStateParams) UnmarshalCBOR(r io.Reader) error { + *t = UpdateChannelStateParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Sv (paych.SignedVoucher) (struct) + + { + + if err := t.Sv.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Sv: %w", err) + } + + } + // t.Secret ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Secret: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Secret = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Secret[:]); err != nil { + return err + } + // t.Proof ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Proof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Proof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Proof[:]); err != nil { + return err + } + return nil +} + +var lengthBufSignedVoucher = []byte{139} + +func (t *SignedVoucher) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSignedVoucher); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ChannelAddr (address.Address) (struct) + if err := t.ChannelAddr.MarshalCBOR(w); err != nil { + return err + } + + // t.TimeLockMin (abi.ChainEpoch) (int64) + if t.TimeLockMin >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TimeLockMin)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TimeLockMin-1)); err != nil { + return err + } + } + + // t.TimeLockMax (abi.ChainEpoch) (int64) + if t.TimeLockMax >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TimeLockMax)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.TimeLockMax-1)); err != nil { + return err + } + } + + // t.SecretPreimage ([]uint8) (slice) + if len(t.SecretPreimage) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.SecretPreimage was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.SecretPreimage))); err != nil { + return err + } + + if _, err := w.Write(t.SecretPreimage[:]); err != nil { + return err + } + + // t.Extra (paych.ModVerifyParams) (struct) + if err := t.Extra.MarshalCBOR(w); err != nil { + return err + } + + // t.Lane (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Lane)); err != nil { + return err + } + + // t.Nonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + // t.Amount (big.Int) (struct) + if err := t.Amount.MarshalCBOR(w); err != nil { + return err + } + + // t.MinSettleHeight (abi.ChainEpoch) (int64) + if t.MinSettleHeight >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinSettleHeight)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinSettleHeight-1)); err != nil { + return err + } + } + + // t.Merges ([]paych.Merge) (slice) + if len(t.Merges) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Merges was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Merges))); err != nil { + return err + } + for _, v := range t.Merges { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.Signature (crypto.Signature) (struct) + if err := t.Signature.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *SignedVoucher) UnmarshalCBOR(r io.Reader) error { + *t = SignedVoucher{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 11 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ChannelAddr (address.Address) (struct) + + { + + if err := t.ChannelAddr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ChannelAddr: %w", err) + } + + } + // t.TimeLockMin (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TimeLockMin = abi.ChainEpoch(extraI) + } + // t.TimeLockMax (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.TimeLockMax = abi.ChainEpoch(extraI) + } + // t.SecretPreimage ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.SecretPreimage: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.SecretPreimage = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.SecretPreimage[:]); err != nil { + return err + } + // t.Extra (paych.ModVerifyParams) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Extra = new(ModVerifyParams) + if err := t.Extra.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Extra pointer: %w", err) + } + } + + } + // t.Lane (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Lane = uint64(extra) + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + // t.Amount (big.Int) (struct) + + { + + if err := t.Amount.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Amount: %w", err) + } + + } + // t.MinSettleHeight (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.MinSettleHeight = abi.ChainEpoch(extraI) + } + // t.Merges ([]paych.Merge) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Merges: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Merges = make([]Merge, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Merge + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Merges[i] = v + } + + // t.Signature (crypto.Signature) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufModVerifyParams = []byte{131} + +func (t *ModVerifyParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufModVerifyParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Actor (address.Address) (struct) + if err := t.Actor.MarshalCBOR(w); err != nil { + return err + } + + // t.Method (abi.MethodNum) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil { + return err + } + + // t.Data ([]uint8) (slice) + if len(t.Data) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Data was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Data))); err != nil { + return err + } + + if _, err := w.Write(t.Data[:]); err != nil { + return err + } + return nil +} + +func (t *ModVerifyParams) UnmarshalCBOR(r io.Reader) error { + *t = ModVerifyParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Actor (address.Address) (struct) + + { + + if err := t.Actor.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Actor: %w", err) + } + + } + // t.Method (abi.MethodNum) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Method = abi.MethodNum(extra) + + } + // t.Data ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Data: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Data = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Data[:]); err != nil { + return err + } + return nil +} + +var lengthBufPaymentVerifyParams = []byte{130} + +func (t *PaymentVerifyParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufPaymentVerifyParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Extra ([]uint8) (slice) + if len(t.Extra) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Extra was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Extra))); err != nil { + return err + } + + if _, err := w.Write(t.Extra[:]); err != nil { + return err + } + + // t.Proof ([]uint8) (slice) + if len(t.Proof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Proof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Proof))); err != nil { + return err + } + + if _, err := w.Write(t.Proof[:]); err != nil { + return err + } + return nil +} + +func (t *PaymentVerifyParams) UnmarshalCBOR(r io.Reader) error { + *t = PaymentVerifyParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Extra ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Extra: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Extra = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Extra[:]); err != nil { + return err + } + // t.Proof ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Proof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Proof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Proof[:]); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/paych_actor.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/paych_actor.go new file mode 100644 index 0000000000..dbb5b62036 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/paych_actor.go @@ -0,0 +1,364 @@ +package paych + +import ( + "bytes" + "math" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/runtime" + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +// Maximum number of lanes in a channel. +const MaxLane = math.MaxInt64 + +const SettleDelay = builtin.EpochsInHour * 12 + +type Actor struct{} + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.UpdateChannelState, + 3: a.Settle, + 4: a.Collect, + } +} + +func (a Actor) Code() cid.Cid { + return builtin.PaymentChannelActorCodeID +} + +func (a Actor) State() cbor.Er { + return new(State) +} + +var _ runtime.VMActor = Actor{} + +type ConstructorParams struct { + From addr.Address // Payer + To addr.Address // Payee +} + +// Constructor creates a payment channel actor. See State for meaning of params. +func (pca *Actor) Constructor(rt runtime.Runtime, params *ConstructorParams) *abi.EmptyValue { + // Only InitActor can create a payment channel actor. It creates the actor on + // behalf of the payer/payee. + rt.ValidateImmediateCallerType(builtin.InitActorCodeID) + + // check that both parties are capable of signing vouchers + to, err := pca.resolveAccount(rt, params.To) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to resolve to address: %s", params.To) + from, err := pca.resolveAccount(rt, params.From) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to resolve from address: %s", params.From) + + emptyArrCid, err := adt.MakeEmptyArray(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create empty array") + + st := ConstructState(from, to, emptyArrCid) + rt.StateCreate(st) + + return nil +} + +// Resolves an address to a canonical ID address and requires it to address an account actor. +// The account actor constructor checks that the embedded address is associated with an appropriate key. +// An alternative (more expensive) would be to send a message to the actor to fetch its key. +func (pca *Actor) resolveAccount(rt runtime.Runtime, raw addr.Address) (addr.Address, error) { + resolved, ok := rt.ResolveAddress(raw) + if !ok { + return addr.Undef, exitcode.ErrNotFound.Wrapf("failed to resolve address %v", raw) + } + + codeCID, ok := rt.GetActorCodeCID(resolved) + if !ok { + return addr.Undef, exitcode.ErrForbidden.Wrapf("no code for address %v", resolved) + } + if codeCID != builtin.AccountActorCodeID { + return addr.Undef, exitcode.ErrForbidden.Wrapf("actor %v must be an account (%v), was %v", raw, + builtin.AccountActorCodeID, codeCID) + } + return resolved, nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Payment Channel state operations +//////////////////////////////////////////////////////////////////////////////// + +type UpdateChannelStateParams struct { + Sv SignedVoucher + Secret []byte + Proof []byte +} + +// A voucher is sent by `From` to `To` off-chain in order to enable +// `To` to redeem payments on-chain in the future +type SignedVoucher struct { + // ChannelAddr is the address of the payment channel this signed voucher is valid for + ChannelAddr addr.Address + // TimeLockMin sets a min epoch before which the voucher cannot be redeemed + TimeLockMin abi.ChainEpoch + // TimeLockMax sets a max epoch beyond which the voucher cannot be redeemed + // TimeLockMax set to 0 means no timeout + TimeLockMax abi.ChainEpoch + // (optional) The SecretPreImage is used by `To` to validate + SecretPreimage []byte + // (optional) Extra can be specified by `From` to add a verification method to the voucher + Extra *ModVerifyParams + // Specifies which lane the Voucher merges into (will be created if does not exist) + Lane uint64 + // Nonce is set by `From` to prevent redemption of stale vouchers on a lane + Nonce uint64 + // Amount voucher can be redeemed for + Amount big.Int + // (optional) MinSettleHeight can extend channel MinSettleHeight if needed + MinSettleHeight abi.ChainEpoch + + // (optional) Set of lanes to be merged into `Lane` + Merges []Merge + + // Sender's signature over the voucher + Signature *crypto.Signature +} + +// Modular Verification method +type ModVerifyParams struct { + Actor addr.Address + Method abi.MethodNum + Data []byte +} + +type PaymentVerifyParams struct { + Extra []byte + Proof []byte +} + +func (pca Actor) UpdateChannelState(rt runtime.Runtime, params *UpdateChannelStateParams) *abi.EmptyValue { + var st State + rt.StateReadonly(&st) + + // both parties must sign voucher: one who submits it, the other explicitly signs it + rt.ValidateImmediateCallerIs(st.From, st.To) + var signer addr.Address + if rt.Caller() == st.From { + signer = st.To + } else { + signer = st.From + } + sv := params.Sv + + if sv.Signature == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "voucher has no signature") + } + + vb, err := sv.SigningBytes() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to serialize signedvoucher") + + err = rt.VerifySignature(*sv.Signature, signer, vb) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "voucher signature invalid") + + pchAddr := rt.Receiver() + if pchAddr != sv.ChannelAddr { + rt.Abortf(exitcode.ErrIllegalArgument, "voucher payment channel address %s does not match receiver %s", sv.ChannelAddr, pchAddr) + } + + if rt.CurrEpoch() < sv.TimeLockMin { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot use this voucher yet!") + } + + if sv.TimeLockMax != 0 && rt.CurrEpoch() > sv.TimeLockMax { + rt.Abortf(exitcode.ErrIllegalArgument, "this voucher has expired!") + } + + if sv.Amount.Sign() < 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "voucher amount must be non-negative, was %v", sv.Amount) + } + + if len(sv.SecretPreimage) > 0 { + hashedSecret := rt.HashBlake2b(params.Secret) + if !bytes.Equal(hashedSecret[:], sv.SecretPreimage) { + rt.Abortf(exitcode.ErrIllegalArgument, "incorrect secret!") + } + } + + if sv.Extra != nil { + + code := rt.Send( + sv.Extra.Actor, + sv.Extra.Method, + &PaymentVerifyParams{ + sv.Extra.Data, + params.Proof, + }, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "spend voucher verification failed") + } + + rt.StateTransaction(&st, func() { + laneFound := true + + lstates, err := adt.AsArray(adt.AsStore(rt), st.LaneStates) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load lanes") + + // Find the voucher lane, creating if necessary. + laneId := sv.Lane + laneState := findLane(rt, lstates, sv.Lane) + + if laneState == nil { + laneState = &LaneState{ + Redeemed: big.Zero(), + Nonce: 0, + } + laneFound = false + } + + if laneFound { + if laneState.Nonce >= sv.Nonce { + rt.Abortf(exitcode.ErrIllegalArgument, "voucher has an outdated nonce, existing nonce: %d, voucher nonce: %d, cannot redeem", + laneState.Nonce, sv.Nonce) + } + } + + // The next section actually calculates the payment amounts to update the payment channel state + // 1. (optional) sum already redeemed value of all merging lanes + redeemedFromOthers := big.Zero() + for _, merge := range sv.Merges { + if merge.Lane == sv.Lane { + rt.Abortf(exitcode.ErrIllegalArgument, "voucher cannot merge lanes into its own lane") + } + + otherls := findLane(rt, lstates, merge.Lane) + if otherls == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "voucher specifies invalid merge lane %v", merge.Lane) + return // makes linters happy + } + + if otherls.Nonce >= merge.Nonce { + rt.Abortf(exitcode.ErrIllegalArgument, "merged lane in voucher has outdated nonce, cannot redeem") + } + + redeemedFromOthers = big.Add(redeemedFromOthers, otherls.Redeemed) + otherls.Nonce = merge.Nonce + err = lstates.Set(merge.Lane, otherls) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to store lane %d", merge.Lane) + } + + // 2. To prevent double counting, remove already redeemed amounts (from + // voucher or other lanes) from the voucher amount + laneState.Nonce = sv.Nonce + balanceDelta := big.Sub(sv.Amount, big.Add(redeemedFromOthers, laneState.Redeemed)) + // 3. set new redeemed value for merged-into lane + laneState.Redeemed = sv.Amount + + newSendBalance := big.Add(st.ToSend, balanceDelta) + + // 4. check operation validity + if newSendBalance.LessThan(big.Zero()) { + rt.Abortf(exitcode.ErrIllegalArgument, "voucher would leave channel balance negative") + } + if newSendBalance.GreaterThan(rt.CurrentBalance()) { + rt.Abortf(exitcode.ErrIllegalArgument, "not enough funds in channel to cover voucher") + } + + // 5. add new redemption ToSend + st.ToSend = newSendBalance + + // update channel settlingAt and MinSettleHeight if delayed by voucher + if sv.MinSettleHeight != 0 { + if st.SettlingAt != 0 && st.SettlingAt < sv.MinSettleHeight { + st.SettlingAt = sv.MinSettleHeight + } + if st.MinSettleHeight < sv.MinSettleHeight { + st.MinSettleHeight = sv.MinSettleHeight + } + } + + err = lstates.Set(laneId, laneState) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to store lane", laneId) + + st.LaneStates, err = lstates.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to save lanes") + }) + return nil +} + +func (pca Actor) Settle(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + var st State + rt.StateTransaction(&st, func() { + rt.ValidateImmediateCallerIs(st.From, st.To) + + if st.SettlingAt != 0 { + rt.Abortf(exitcode.ErrIllegalState, "channel already settling") + } + + st.SettlingAt = rt.CurrEpoch() + SettleDelay + if st.SettlingAt < st.MinSettleHeight { + st.SettlingAt = st.MinSettleHeight + } + }) + return nil +} + +func (pca Actor) Collect(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + var st State + rt.StateReadonly(&st) + rt.ValidateImmediateCallerIs(st.From, st.To) + + if st.SettlingAt == 0 || rt.CurrEpoch() < st.SettlingAt { + rt.Abortf(exitcode.ErrForbidden, "payment channel not settling or settled") + } + + // send ToSend to "To" + codeTo := rt.Send( + st.To, + builtin.MethodSend, + nil, + st.ToSend, + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, codeTo, "Failed to send funds to `To`") + + // the remaining balance will be returned to "From" upon deletion. + rt.DeleteActor(st.From) + + return nil +} + +func (t *SignedVoucher) SigningBytes() ([]byte, error) { + osv := *t + osv.Signature = nil + + buf := new(bytes.Buffer) + if err := osv.MarshalCBOR(buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Returns the insertion index for a lane ID, with the matching lane state if found, or nil. +func findLane(rt runtime.Runtime, ls *adt.Array, id uint64) *LaneState { + if id > MaxLane { + rt.Abortf(exitcode.ErrIllegalArgument, "maximum lane ID is 2^63-1") + } + + var out LaneState + found, err := ls.Get(id, &out) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load lane %d", id) + + if !found { + return nil + } + + return &out +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/paych_state.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/paych_state.go new file mode 100644 index 0000000000..cb8b1afb16 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/paych/paych_state.go @@ -0,0 +1,53 @@ +package paych + +import ( + addr "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + "github.com/ipfs/go-cid" +) + +// A given payment channel actor is established by From +// to enable off-chain microtransactions to To to be reconciled +// and tallied on chain. +type State struct { + // Channel owner, who has funded the actor + From addr.Address + // Recipient of payouts from channel + To addr.Address + + // Amount successfully redeemed through the payment channel, paid out on `Collect()` + ToSend abi.TokenAmount + + // Height at which the channel can be `Collected` + SettlingAt abi.ChainEpoch + // Height before which the channel `ToSend` cannot be collected + MinSettleHeight abi.ChainEpoch + + // Collections of lane states for the channel, maintained in ID order. + LaneStates cid.Cid // AMT +} + +// The Lane state tracks the latest (highest) voucher nonce used to merge the lane +// as well as the amount it has already redeemed. +type LaneState struct { + Redeemed big.Int + Nonce uint64 +} + +// Specifies which `Lane`s to be merged with what `Nonce` on channelUpdate +type Merge struct { + Lane uint64 + Nonce uint64 +} + +func ConstructState(from addr.Address, to addr.Address, emptyArrCid cid.Cid) *State { + return &State{ + From: from, + To: to, + ToSend: big.Zero(), + SettlingAt: 0, + MinSettleHeight: 0, + LaneStates: emptyArrCid, + } +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/cbor_gen.go new file mode 100644 index 0000000000..26f6c127c6 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/cbor_gen.go @@ -0,0 +1,1448 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package power + +import ( + "fmt" + "io" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + smoothing "github.com/filecoin-project/specs-actors/actors/util/smoothing" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufState = []byte{144} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.TotalRawBytePower (big.Int) (struct) + if err := t.TotalRawBytePower.MarshalCBOR(w); err != nil { + return err + } + + // t.TotalBytesCommitted (big.Int) (struct) + if err := t.TotalBytesCommitted.MarshalCBOR(w); err != nil { + return err + } + + // t.TotalQualityAdjPower (big.Int) (struct) + if err := t.TotalQualityAdjPower.MarshalCBOR(w); err != nil { + return err + } + + // t.TotalQABytesCommitted (big.Int) (struct) + if err := t.TotalQABytesCommitted.MarshalCBOR(w); err != nil { + return err + } + + // t.TotalPledgeCollateral (big.Int) (struct) + if err := t.TotalPledgeCollateral.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochRawBytePower (big.Int) (struct) + if err := t.ThisEpochRawBytePower.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochQualityAdjPower (big.Int) (struct) + if err := t.ThisEpochQualityAdjPower.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochPledgeCollateral (big.Int) (struct) + if err := t.ThisEpochPledgeCollateral.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) + if err := t.ThisEpochQAPowerSmoothed.MarshalCBOR(w); err != nil { + return err + } + + // t.MinerCount (int64) (int64) + if t.MinerCount >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinerCount)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinerCount-1)); err != nil { + return err + } + } + + // t.MinerAboveMinPowerCount (int64) (int64) + if t.MinerAboveMinPowerCount >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinerAboveMinPowerCount)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.MinerAboveMinPowerCount-1)); err != nil { + return err + } + } + + // t.CronEventQueue (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.CronEventQueue); err != nil { + return xerrors.Errorf("failed to write cid field t.CronEventQueue: %w", err) + } + + // t.FirstCronEpoch (abi.ChainEpoch) (int64) + if t.FirstCronEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.FirstCronEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.FirstCronEpoch-1)); err != nil { + return err + } + } + + // t.LastProcessedCronEpoch (abi.ChainEpoch) (int64) + if t.LastProcessedCronEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.LastProcessedCronEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.LastProcessedCronEpoch-1)); err != nil { + return err + } + } + + // t.Claims (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Claims); err != nil { + return xerrors.Errorf("failed to write cid field t.Claims: %w", err) + } + + // t.ProofValidationBatch (cid.Cid) (struct) + + if t.ProofValidationBatch == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.ProofValidationBatch); err != nil { + return xerrors.Errorf("failed to write cid field t.ProofValidationBatch: %w", err) + } + } + + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 16 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.TotalRawBytePower (big.Int) (struct) + + { + + if err := t.TotalRawBytePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalRawBytePower: %w", err) + } + + } + // t.TotalBytesCommitted (big.Int) (struct) + + { + + if err := t.TotalBytesCommitted.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalBytesCommitted: %w", err) + } + + } + // t.TotalQualityAdjPower (big.Int) (struct) + + { + + if err := t.TotalQualityAdjPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalQualityAdjPower: %w", err) + } + + } + // t.TotalQABytesCommitted (big.Int) (struct) + + { + + if err := t.TotalQABytesCommitted.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalQABytesCommitted: %w", err) + } + + } + // t.TotalPledgeCollateral (big.Int) (struct) + + { + + if err := t.TotalPledgeCollateral.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalPledgeCollateral: %w", err) + } + + } + // t.ThisEpochRawBytePower (big.Int) (struct) + + { + + if err := t.ThisEpochRawBytePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochRawBytePower: %w", err) + } + + } + // t.ThisEpochQualityAdjPower (big.Int) (struct) + + { + + if err := t.ThisEpochQualityAdjPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochQualityAdjPower: %w", err) + } + + } + // t.ThisEpochPledgeCollateral (big.Int) (struct) + + { + + if err := t.ThisEpochPledgeCollateral.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochPledgeCollateral: %w", err) + } + + } + // t.ThisEpochQAPowerSmoothed (smoothing.FilterEstimate) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.ThisEpochQAPowerSmoothed = new(smoothing.FilterEstimate) + if err := t.ThisEpochQAPowerSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochQAPowerSmoothed pointer: %w", err) + } + } + + } + // t.MinerCount (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.MinerCount = int64(extraI) + } + // t.MinerAboveMinPowerCount (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.MinerAboveMinPowerCount = int64(extraI) + } + // t.CronEventQueue (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CronEventQueue: %w", err) + } + + t.CronEventQueue = c + + } + // t.FirstCronEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.FirstCronEpoch = abi.ChainEpoch(extraI) + } + // t.LastProcessedCronEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.LastProcessedCronEpoch = abi.ChainEpoch(extraI) + } + // t.Claims (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Claims: %w", err) + } + + t.Claims = c + + } + // t.ProofValidationBatch (cid.Cid) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProofValidationBatch: %w", err) + } + + t.ProofValidationBatch = &c + } + + } + return nil +} + +var lengthBufClaim = []byte{130} + +func (t *Claim) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufClaim); err != nil { + return err + } + + // t.RawBytePower (big.Int) (struct) + if err := t.RawBytePower.MarshalCBOR(w); err != nil { + return err + } + + // t.QualityAdjPower (big.Int) (struct) + if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Claim) UnmarshalCBOR(r io.Reader) error { + *t = Claim{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.RawBytePower (big.Int) (struct) + + { + + if err := t.RawBytePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) + } + + } + // t.QualityAdjPower (big.Int) (struct) + + { + + if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) + } + + } + return nil +} + +var lengthBufCronEvent = []byte{130} + +func (t *CronEvent) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCronEvent); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.MinerAddr (address.Address) (struct) + if err := t.MinerAddr.MarshalCBOR(w); err != nil { + return err + } + + // t.CallbackPayload ([]uint8) (slice) + if len(t.CallbackPayload) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.CallbackPayload was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.CallbackPayload))); err != nil { + return err + } + + if _, err := w.Write(t.CallbackPayload[:]); err != nil { + return err + } + return nil +} + +func (t *CronEvent) UnmarshalCBOR(r io.Reader) error { + *t = CronEvent{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.MinerAddr (address.Address) (struct) + + { + + if err := t.MinerAddr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.MinerAddr: %w", err) + } + + } + // t.CallbackPayload ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.CallbackPayload: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.CallbackPayload = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.CallbackPayload[:]); err != nil { + return err + } + return nil +} + +var lengthBufCreateMinerParams = []byte{133} + +func (t *CreateMinerParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCreateMinerParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Owner (address.Address) (struct) + if err := t.Owner.MarshalCBOR(w); err != nil { + return err + } + + // t.Worker (address.Address) (struct) + if err := t.Worker.MarshalCBOR(w); err != nil { + return err + } + + // t.SealProofType (abi.RegisteredSealProof) (int64) + if t.SealProofType >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProofType)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProofType-1)); err != nil { + return err + } + } + + // t.Peer ([]uint8) (slice) + if len(t.Peer) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Peer was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Peer))); err != nil { + return err + } + + if _, err := w.Write(t.Peer[:]); err != nil { + return err + } + + // t.Multiaddrs ([][]uint8) (slice) + if len(t.Multiaddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { + return err + } + for _, v := range t.Multiaddrs { + if len(v) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field v was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { + return err + } + + if _, err := w.Write(v[:]); err != nil { + return err + } + } + return nil +} + +func (t *CreateMinerParams) UnmarshalCBOR(r io.Reader) error { + *t = CreateMinerParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 5 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Owner (address.Address) (struct) + + { + + if err := t.Owner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Owner: %w", err) + } + + } + // t.Worker (address.Address) (struct) + + { + + if err := t.Worker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Worker: %w", err) + } + + } + // t.SealProofType (abi.RegisteredSealProof) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SealProofType = abi.RegisteredSealProof(extraI) + } + // t.Peer ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Peer: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Peer = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Peer[:]); err != nil { + return err + } + // t.Multiaddrs ([][]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Multiaddrs = make([][]uint8, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Multiaddrs[i] = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { + return err + } + } + } + + return nil +} + +var lengthBufEnrollCronEventParams = []byte{130} + +func (t *EnrollCronEventParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufEnrollCronEventParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.EventEpoch (abi.ChainEpoch) (int64) + if t.EventEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EventEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EventEpoch-1)); err != nil { + return err + } + } + + // t.Payload ([]uint8) (slice) + if len(t.Payload) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Payload was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Payload))); err != nil { + return err + } + + if _, err := w.Write(t.Payload[:]); err != nil { + return err + } + return nil +} + +func (t *EnrollCronEventParams) UnmarshalCBOR(r io.Reader) error { + *t = EnrollCronEventParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.EventEpoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EventEpoch = abi.ChainEpoch(extraI) + } + // t.Payload ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Payload: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Payload = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Payload[:]); err != nil { + return err + } + return nil +} + +var lengthBufUpdateClaimedPowerParams = []byte{130} + +func (t *UpdateClaimedPowerParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufUpdateClaimedPowerParams); err != nil { + return err + } + + // t.RawByteDelta (big.Int) (struct) + if err := t.RawByteDelta.MarshalCBOR(w); err != nil { + return err + } + + // t.QualityAdjustedDelta (big.Int) (struct) + if err := t.QualityAdjustedDelta.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *UpdateClaimedPowerParams) UnmarshalCBOR(r io.Reader) error { + *t = UpdateClaimedPowerParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.RawByteDelta (big.Int) (struct) + + { + + if err := t.RawByteDelta.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RawByteDelta: %w", err) + } + + } + // t.QualityAdjustedDelta (big.Int) (struct) + + { + + if err := t.QualityAdjustedDelta.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.QualityAdjustedDelta: %w", err) + } + + } + return nil +} + +var lengthBufCreateMinerReturn = []byte{130} + +func (t *CreateMinerReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCreateMinerReturn); err != nil { + return err + } + + // t.IDAddress (address.Address) (struct) + if err := t.IDAddress.MarshalCBOR(w); err != nil { + return err + } + + // t.RobustAddress (address.Address) (struct) + if err := t.RobustAddress.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *CreateMinerReturn) UnmarshalCBOR(r io.Reader) error { + *t = CreateMinerReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.IDAddress (address.Address) (struct) + + { + + if err := t.IDAddress.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.IDAddress: %w", err) + } + + } + // t.RobustAddress (address.Address) (struct) + + { + + if err := t.RobustAddress.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RobustAddress: %w", err) + } + + } + return nil +} + +var lengthBufCurrentTotalPowerReturn = []byte{132} + +func (t *CurrentTotalPowerReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCurrentTotalPowerReturn); err != nil { + return err + } + + // t.RawBytePower (big.Int) (struct) + if err := t.RawBytePower.MarshalCBOR(w); err != nil { + return err + } + + // t.QualityAdjPower (big.Int) (struct) + if err := t.QualityAdjPower.MarshalCBOR(w); err != nil { + return err + } + + // t.PledgeCollateral (big.Int) (struct) + if err := t.PledgeCollateral.MarshalCBOR(w); err != nil { + return err + } + + // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) + if err := t.QualityAdjPowerSmoothed.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *CurrentTotalPowerReturn) UnmarshalCBOR(r io.Reader) error { + *t = CurrentTotalPowerReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.RawBytePower (big.Int) (struct) + + { + + if err := t.RawBytePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RawBytePower: %w", err) + } + + } + // t.QualityAdjPower (big.Int) (struct) + + { + + if err := t.QualityAdjPower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.QualityAdjPower: %w", err) + } + + } + // t.PledgeCollateral (big.Int) (struct) + + { + + if err := t.PledgeCollateral.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PledgeCollateral: %w", err) + } + + } + // t.QualityAdjPowerSmoothed (smoothing.FilterEstimate) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.QualityAdjPowerSmoothed = new(smoothing.FilterEstimate) + if err := t.QualityAdjPowerSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.QualityAdjPowerSmoothed pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufMinerConstructorParams = []byte{134} + +func (t *MinerConstructorParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMinerConstructorParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.OwnerAddr (address.Address) (struct) + if err := t.OwnerAddr.MarshalCBOR(w); err != nil { + return err + } + + // t.WorkerAddr (address.Address) (struct) + if err := t.WorkerAddr.MarshalCBOR(w); err != nil { + return err + } + + // t.ControlAddrs ([]address.Address) (slice) + if len(t.ControlAddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.ControlAddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.ControlAddrs))); err != nil { + return err + } + for _, v := range t.ControlAddrs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.SealProofType (abi.RegisteredSealProof) (int64) + if t.SealProofType >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SealProofType)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SealProofType-1)); err != nil { + return err + } + } + + // t.PeerId ([]uint8) (slice) + if len(t.PeerId) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.PeerId was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PeerId))); err != nil { + return err + } + + if _, err := w.Write(t.PeerId[:]); err != nil { + return err + } + + // t.Multiaddrs ([][]uint8) (slice) + if len(t.Multiaddrs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Multiaddrs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Multiaddrs))); err != nil { + return err + } + for _, v := range t.Multiaddrs { + if len(v) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field v was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(v))); err != nil { + return err + } + + if _, err := w.Write(v[:]); err != nil { + return err + } + } + return nil +} + +func (t *MinerConstructorParams) UnmarshalCBOR(r io.Reader) error { + *t = MinerConstructorParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 6 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.OwnerAddr (address.Address) (struct) + + { + + if err := t.OwnerAddr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.OwnerAddr: %w", err) + } + + } + // t.WorkerAddr (address.Address) (struct) + + { + + if err := t.WorkerAddr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.WorkerAddr: %w", err) + } + + } + // t.ControlAddrs ([]address.Address) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.ControlAddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.ControlAddrs = make([]address.Address, extra) + } + + for i := 0; i < int(extra); i++ { + + var v address.Address + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.ControlAddrs[i] = v + } + + // t.SealProofType (abi.RegisteredSealProof) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SealProofType = abi.RegisteredSealProof(extraI) + } + // t.PeerId ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.PeerId: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.PeerId = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.PeerId[:]); err != nil { + return err + } + // t.Multiaddrs ([][]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Multiaddrs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Multiaddrs = make([][]uint8, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Multiaddrs[i]: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Multiaddrs[i] = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Multiaddrs[i][:]); err != nil { + return err + } + } + } + + return nil +} + +var lengthBufSectorStorageWeightDesc = []byte{132} + +func (t *SectorStorageWeightDesc) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSectorStorageWeightDesc); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.SectorSize (abi.SectorSize) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorSize)); err != nil { + return err + } + + // t.Duration (abi.ChainEpoch) (int64) + if t.Duration >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Duration)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Duration-1)); err != nil { + return err + } + } + + // t.DealWeight (big.Int) (struct) + if err := t.DealWeight.MarshalCBOR(w); err != nil { + return err + } + + // t.VerifiedDealWeight (big.Int) (struct) + if err := t.VerifiedDealWeight.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *SectorStorageWeightDesc) UnmarshalCBOR(r io.Reader) error { + *t = SectorStorageWeightDesc{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.SectorSize (abi.SectorSize) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorSize = abi.SectorSize(extra) + + } + // t.Duration (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Duration = abi.ChainEpoch(extraI) + } + // t.DealWeight (big.Int) (struct) + + { + + if err := t.DealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealWeight: %w", err) + } + + } + // t.VerifiedDealWeight (big.Int) (struct) + + { + + if err := t.VerifiedDealWeight.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedDealWeight: %w", err) + } + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/policy.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/policy.go new file mode 100644 index 0000000000..4b01cfe028 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/policy.go @@ -0,0 +1,18 @@ +package power + +import ( + abi "github.com/filecoin-project/go-state-types/abi" +) + +// Minimum number of registered miners for the minimum miner size limit to effectively limit consensus power. +const ConsensusMinerMinMiners = 3 + +// Minimum power of an individual miner to meet the threshold for leader election. +var ConsensusMinerMinPower = abi.NewStoragePower(1 << 40) // PARAM_FINISH + +// Maximum number of prove commits a miner can submit in one epoch +// +// We bound this to 200 to limit the number of prove partitions we may need to update in a given epoch to 200. +// +// To support onboarding 1EiB/year, we need to allow at least 32 prove commits per epoch. +const MaxMinerProveCommitsPerEpoch = 200 diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/power_actor.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/power_actor.go new file mode 100644 index 0000000000..0c9b9ea84a --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/power_actor.go @@ -0,0 +1,503 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/builtin" + initact "github.com/filecoin-project/specs-actors/actors/builtin/init" + "github.com/filecoin-project/specs-actors/actors/runtime" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +type Runtime = runtime.Runtime + +type SectorTermination int64 + +const ( + ErrTooManyProveCommits = exitcode.FirstActorSpecificExitCode + iota +) + +type Actor struct{} + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.CreateMiner, + 3: a.UpdateClaimedPower, + 4: a.EnrollCronEvent, + 5: a.OnEpochTickEnd, + 6: a.UpdatePledgeTotal, + 7: a.OnConsensusFault, + 8: a.SubmitPoRepForBulkVerify, + 9: a.CurrentTotalPower, + } +} + +func (a Actor) Code() cid.Cid { + return builtin.StoragePowerActorCodeID +} + +func (a Actor) IsSingleton() bool { + return true +} + +func (a Actor) State() cbor.Er { + return new(State) +} + +var _ runtime.VMActor = Actor{} + +// Storage miner actor constructor params are defined here so the power actor can send them to the init actor +// to instantiate miners. +type MinerConstructorParams struct { + OwnerAddr addr.Address + WorkerAddr addr.Address + ControlAddrs []addr.Address + SealProofType abi.RegisteredSealProof + PeerId abi.PeerID + Multiaddrs []abi.Multiaddrs +} + +type SectorStorageWeightDesc struct { + SectorSize abi.SectorSize + Duration abi.ChainEpoch + DealWeight abi.DealWeight + VerifiedDealWeight abi.DealWeight +} + +//////////////////////////////////////////////////////////////////////////////// +// Actor methods +//////////////////////////////////////////////////////////////////////////////// + +func (a Actor) Constructor(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + + emptyMap, err := adt.MakeEmptyMap(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to construct state") + emptyMMapCid, err := adt.MakeEmptyMultimap(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to construct state") + + st := ConstructState(emptyMap, emptyMMapCid) + rt.StateCreate(st) + return nil +} + +type CreateMinerParams struct { + Owner addr.Address + Worker addr.Address + SealProofType abi.RegisteredSealProof + Peer abi.PeerID + Multiaddrs []abi.Multiaddrs +} + +type CreateMinerReturn struct { + IDAddress addr.Address // The canonical ID-based address for the actor. + RobustAddress addr.Address // A more expensive but re-org-safe address for the newly created actor. +} + +func (a Actor) CreateMiner(rt Runtime, params *CreateMinerParams) *CreateMinerReturn { + rt.ValidateImmediateCallerType(builtin.CallerTypesSignable...) + + ctorParams := MinerConstructorParams{ + OwnerAddr: params.Owner, + WorkerAddr: params.Worker, + SealProofType: params.SealProofType, + PeerId: params.Peer, + Multiaddrs: params.Multiaddrs, + } + ctorParamBuf := new(bytes.Buffer) + err := ctorParams.MarshalCBOR(ctorParamBuf) + builtin.RequireNoErr(rt, err, exitcode.ErrSerialization, "failed to serialize miner constructor params %v", ctorParams) + + var addresses initact.ExecReturn + code := rt.Send( + builtin.InitActorAddr, + builtin.MethodsInit.Exec, + &initact.ExecParams{ + CodeCID: builtin.StorageMinerActorCodeID, + ConstructorParams: ctorParamBuf.Bytes(), + }, + rt.ValueReceived(), // Pass on any value to the new actor. + &addresses, + ) + builtin.RequireSuccess(rt, code, "failed to init new actor") + + var st State + rt.StateTransaction(&st, func() { + claims, err := adt.AsMap(adt.AsStore(rt), st.Claims) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load claims") + + err = setClaim(claims, addresses.IDAddress, &Claim{abi.NewStoragePower(0), abi.NewStoragePower(0)}) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put power in claimed table while creating miner") + + st.MinerCount += 1 + + st.Claims, err = claims.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush claims") + }) + return &CreateMinerReturn{ + IDAddress: addresses.IDAddress, + RobustAddress: addresses.RobustAddress, + } +} + +type UpdateClaimedPowerParams struct { + RawByteDelta abi.StoragePower + QualityAdjustedDelta abi.StoragePower +} + +// Adds or removes claimed power for the calling actor. +// May only be invoked by a miner actor. +func (a Actor) UpdateClaimedPower(rt Runtime, params *UpdateClaimedPowerParams) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + minerAddr := rt.Caller() + var st State + rt.StateTransaction(&st, func() { + claims, err := adt.AsMap(adt.AsStore(rt), st.Claims) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load claims") + + err = st.addToClaim(claims, minerAddr, params.RawByteDelta, params.QualityAdjustedDelta) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update power raw %s, qa %s", params.RawByteDelta, params.QualityAdjustedDelta) + + st.Claims, err = claims.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush claims") + }) + return nil +} + +type EnrollCronEventParams struct { + EventEpoch abi.ChainEpoch + Payload []byte +} + +func (a Actor) EnrollCronEvent(rt Runtime, params *EnrollCronEventParams) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + minerAddr := rt.Caller() + minerEvent := CronEvent{ + MinerAddr: minerAddr, + CallbackPayload: params.Payload, + } + + // Ensure it is not possible to enter a large negative number which would cause problems in cron processing. + if params.EventEpoch < 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "cron event epoch %d cannot be less than zero", params.EventEpoch) + } + + var st State + rt.StateTransaction(&st, func() { + events, err := adt.AsMultimap(adt.AsStore(rt), st.CronEventQueue) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load cron events") + + err = st.appendCronEvent(events, params.EventEpoch, &minerEvent) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to enroll cron event") + + st.CronEventQueue, err = events.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush cron events") + }) + return nil +} + +// Called by Cron. +func (a Actor) OnEpochTickEnd(rt Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.CronActorAddr) + + a.processDeferredCronEvents(rt) + a.processBatchProofVerifies(rt) + + var st State + rt.StateTransaction(&st, func() { + // update next epoch's power and pledge values + // this must come before the next epoch's rewards are calculated + // so that next epoch reward reflects power added this epoch + rawBytePower, qaPower := CurrentTotalPower(&st) + st.ThisEpochPledgeCollateral = st.TotalPledgeCollateral + st.ThisEpochQualityAdjPower = qaPower + st.ThisEpochRawBytePower = rawBytePower + delta := rt.CurrEpoch() - st.LastProcessedCronEpoch + st.updateSmoothedEstimate(delta) + + st.LastProcessedCronEpoch = rt.CurrEpoch() + }) + + // update network KPI in RewardActor + code := rt.Send( + builtin.RewardActorAddr, + builtin.MethodsReward.UpdateNetworkKPI, + &st.ThisEpochRawBytePower, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + builtin.RequireSuccess(rt, code, "failed to update network KPI with Reward Actor") + + return nil +} + +func (a Actor) UpdatePledgeTotal(rt Runtime, pledgeDelta *abi.TokenAmount) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + var st State + rt.StateTransaction(&st, func() { + st.addPledgeTotal(*pledgeDelta) + }) + return nil +} + +func (a Actor) OnConsensusFault(rt Runtime, pledgeAmount *abi.TokenAmount) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + minerAddr := rt.Caller() + + var st State + rt.StateTransaction(&st, func() { + claims, err := adt.AsMap(adt.AsStore(rt), st.Claims) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load claims") + + claim, powerOk, err := getClaim(claims, minerAddr) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to read claimed power for fault") + if !powerOk { + rt.Abortf(exitcode.ErrNotFound, "miner %v not registered (already slashed?)", minerAddr) + } + Assert(claim.RawBytePower.GreaterThanEqual(big.Zero())) + Assert(claim.QualityAdjPower.GreaterThanEqual(big.Zero())) + err = st.addToClaim(claims, minerAddr, claim.RawBytePower.Neg(), claim.QualityAdjPower.Neg()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "could not add to claim for %s after loading existing claim for this address", minerAddr) + + st.addPledgeTotal(pledgeAmount.Neg()) + + // delete miner actor claims + err = claims.Delete(abi.AddrKey(minerAddr)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to remove miner %v", minerAddr) + + st.MinerCount -= 1 + + st.Claims, err = claims.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush claims") + }) + + return nil +} + +// GasOnSubmitVerifySeal is amount of gas charged for SubmitPoRepForBulkVerify +// This number is empirically determined +const GasOnSubmitVerifySeal = 34721049 + +func (a Actor) SubmitPoRepForBulkVerify(rt Runtime, sealInfo *proof.SealVerifyInfo) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.StorageMinerActorCodeID) + + minerAddr := rt.Caller() + + var st State + rt.StateTransaction(&st, func() { + store := adt.AsStore(rt) + var mmap *adt.Multimap + if st.ProofValidationBatch == nil { + mmap = adt.MakeEmptyMultimap(store) + } else { + var err error + mmap, err = adt.AsMultimap(adt.AsStore(rt), *st.ProofValidationBatch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load proof batch set") + } + + arr, found, err := mmap.Get(abi.AddrKey(minerAddr)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get get seal verify infos at addr %s", minerAddr) + if found && arr.Length() >= MaxMinerProveCommitsPerEpoch { + rt.Abortf(ErrTooManyProveCommits, "miner %s attempting to prove commit over %d sectors in epoch", minerAddr, MaxMinerProveCommitsPerEpoch) + } + + err = mmap.Add(abi.AddrKey(minerAddr), sealInfo) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to insert proof into batch") + + mmrc, err := mmap.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush proof batch") + + rt.ChargeGas("OnSubmitVerifySeal", GasOnSubmitVerifySeal, 0) + st.ProofValidationBatch = &mmrc + }) + + return nil +} + +type CurrentTotalPowerReturn struct { + RawBytePower abi.StoragePower + QualityAdjPower abi.StoragePower + PledgeCollateral abi.TokenAmount + QualityAdjPowerSmoothed *smoothing.FilterEstimate +} + +// Returns the total power and pledge recorded by the power actor. +// The returned values are frozen during the cron tick before this epoch +// so that this method returns consistent values while processing all messages +// of an epoch. +func (a Actor) CurrentTotalPower(rt Runtime, _ *abi.EmptyValue) *CurrentTotalPowerReturn { + rt.ValidateImmediateCallerAcceptAny() + var st State + rt.StateReadonly(&st) + + return &CurrentTotalPowerReturn{ + RawBytePower: st.ThisEpochRawBytePower, + QualityAdjPower: st.ThisEpochQualityAdjPower, + PledgeCollateral: st.ThisEpochPledgeCollateral, + QualityAdjPowerSmoothed: st.ThisEpochQAPowerSmoothed, + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Method utility functions +//////////////////////////////////////////////////////////////////////////////// + +func (a Actor) processBatchProofVerifies(rt Runtime) { + var st State + + var miners []address.Address + verifies := make(map[address.Address][]proof.SealVerifyInfo) + + rt.StateTransaction(&st, func() { + store := adt.AsStore(rt) + if st.ProofValidationBatch == nil { + return + } + mmap, err := adt.AsMultimap(store, *st.ProofValidationBatch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load proofs validation batch") + + err = mmap.ForAll(func(k string, arr *adt.Array) error { + a, err := address.NewFromBytes([]byte(k)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to parse address key") + + miners = append(miners, a) + + var infos []proof.SealVerifyInfo + var svi proof.SealVerifyInfo + err = arr.ForEach(&svi, func(i int64) error { + infos = append(infos, svi) + return nil + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to iterate over proof verify array for miner %s", a) + + verifies[a] = infos + return nil + }) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to iterate proof batch") + + st.ProofValidationBatch = nil + }) + + res, err := rt.BatchVerifySeals(verifies) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to batch verify") + + for _, m := range miners { + vres, ok := res[m] + if !ok { + rt.Abortf(exitcode.ErrNotFound, "batch verify seals syscall implemented incorrectly") + } + + verifs := verifies[m] + + seen := map[abi.SectorNumber]struct{}{} + var successful []abi.SectorNumber + for i, r := range vres { + if r { + snum := verifs[i].SectorID.Number + + if _, exists := seen[snum]; exists { + // filter-out duplicates + continue + } + + seen[snum] = struct{}{} + successful = append(successful, snum) + } + } + + // The exit code is explicitly ignored + _ = rt.Send( + m, + builtin.MethodsMiner.ConfirmSectorProofsValid, + &builtin.ConfirmSectorProofsParams{Sectors: successful}, + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + } +} + +func (a Actor) processDeferredCronEvents(rt Runtime) { + rtEpoch := rt.CurrEpoch() + + var cronEvents []CronEvent + var st State + rt.StateTransaction(&st, func() { + events, err := adt.AsMultimap(adt.AsStore(rt), st.CronEventQueue) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load cron events") + + for epoch := st.FirstCronEpoch; epoch <= rtEpoch; epoch++ { + epochEvents, err := loadCronEvents(events, epoch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load cron events at %v", epoch) + + cronEvents = append(cronEvents, epochEvents...) + + if len(epochEvents) > 0 { + err = events.RemoveAll(epochKey(epoch)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to clear cron events at %v", epoch) + } + } + + st.FirstCronEpoch = rtEpoch + 1 + + st.CronEventQueue, err = events.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush events") + }) + failedMinerCrons := make([]addr.Address, 0) + for _, event := range cronEvents { + code := rt.Send( + event.MinerAddr, + builtin.MethodsMiner.OnDeferredCronEvent, + runtime.CBORBytes(event.CallbackPayload), + abi.NewTokenAmount(0), + &builtin.Discard{}, + ) + // If a callback fails, this actor continues to invoke other callbacks + // and persists state removing the failed event from the event queue. It won't be tried again. + // Failures are unexpected here but will result in removal of miner power + // A log message would really help here. + if code != exitcode.Ok { + rt.Log(rtt.WARN, "OnDeferredCronEvent failed for miner %s: exitcode %d", event.MinerAddr, code) + failedMinerCrons = append(failedMinerCrons, event.MinerAddr) + } + } + rt.StateTransaction(&st, func() { + claims, err := adt.AsMap(adt.AsStore(rt), st.Claims) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load claims") + + // Remove power and leave miner frozen + for _, minerAddr := range failedMinerCrons { + claim, found, err := getClaim(claims, minerAddr) + if err != nil { + rt.Log(rtt.ERROR, "failed to get claim for miner %s after failing OnDeferredCronEvent: %s", minerAddr, err) + continue + } + if !found { + rt.Log(rtt.WARN, "miner OnDeferredCronEvent failed for miner %s with no power", minerAddr) + continue + } + + // zero out miner power + err = st.addToClaim(claims, minerAddr, claim.RawBytePower.Neg(), claim.QualityAdjPower.Neg()) + if err != nil { + rt.Log(rtt.WARN, "failed to remove (%d, %d) power for miner %s after to failed cron", claim.RawBytePower, claim.QualityAdjPower, minerAddr) + continue + } + } + + st.Claims, err = claims.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush claims") + }) +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/power_state.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/power_state.go new file mode 100644 index 0000000000..c3cd25eb34 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/power/power_state.go @@ -0,0 +1,267 @@ +package power + +import ( + "fmt" + "reflect" + + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + cid "github.com/ipfs/go-cid" + errors "github.com/pkg/errors" + "golang.org/x/xerrors" + + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +// genesis power in bytes = 750,000 GiB +var InitialQAPowerEstimatePosition = big.Mul(big.NewInt(750_000), big.NewInt(1<<30)) + +// max chain throughput in bytes per epoch = 120 ProveCommits / epoch = 3,840 GiB +var InitialQAPowerEstimateVelocity = big.Mul(big.NewInt(3_840), big.NewInt(1<<30)) + +type State struct { + TotalRawBytePower abi.StoragePower + // TotalBytesCommitted includes claims from miners below min power threshold + TotalBytesCommitted abi.StoragePower + TotalQualityAdjPower abi.StoragePower + // TotalQABytesCommitted includes claims from miners below min power threshold + TotalQABytesCommitted abi.StoragePower + TotalPledgeCollateral abi.TokenAmount + + // These fields are set once per epoch in the previous cron tick and used + // for consistent values across a single epoch's state transition. + ThisEpochRawBytePower abi.StoragePower + ThisEpochQualityAdjPower abi.StoragePower + ThisEpochPledgeCollateral abi.TokenAmount + ThisEpochQAPowerSmoothed *smoothing.FilterEstimate + + MinerCount int64 + // Number of miners having proven the minimum consensus power. + MinerAboveMinPowerCount int64 + + // A queue of events to be triggered by cron, indexed by epoch. + CronEventQueue cid.Cid // Multimap, (HAMT[ChainEpoch]AMT[CronEvent] + + // First epoch in which a cron task may be stored. + // Cron will iterate every epoch between this and the current epoch inclusively to find tasks to execute. + FirstCronEpoch abi.ChainEpoch + + // Last epoch power cron tick has been processed. + LastProcessedCronEpoch abi.ChainEpoch + + // Claimed power for each miner. + Claims cid.Cid // Map, HAMT[address]Claim + + ProofValidationBatch *cid.Cid +} + +type Claim struct { + // Sum of raw byte power for a miner's sectors. + RawBytePower abi.StoragePower + + // Sum of quality adjusted power for a miner's sectors. + QualityAdjPower abi.StoragePower +} + +type CronEvent struct { + MinerAddr addr.Address + CallbackPayload []byte +} + +func ConstructState(emptyMapCid, emptyMMapCid cid.Cid) *State { + return &State{ + TotalRawBytePower: abi.NewStoragePower(0), + TotalBytesCommitted: abi.NewStoragePower(0), + TotalQualityAdjPower: abi.NewStoragePower(0), + TotalQABytesCommitted: abi.NewStoragePower(0), + TotalPledgeCollateral: abi.NewTokenAmount(0), + ThisEpochRawBytePower: abi.NewStoragePower(0), + ThisEpochQualityAdjPower: abi.NewStoragePower(0), + ThisEpochPledgeCollateral: abi.NewTokenAmount(0), + ThisEpochQAPowerSmoothed: smoothing.NewEstimate(InitialQAPowerEstimatePosition, InitialQAPowerEstimateVelocity), + FirstCronEpoch: 0, + LastProcessedCronEpoch: abi.ChainEpoch(-1), + CronEventQueue: emptyMMapCid, + Claims: emptyMapCid, + MinerCount: 0, + MinerAboveMinPowerCount: 0, + } +} + +// MinerNominalPowerMeetsConsensusMinimum is used to validate Election PoSt +// winners outside the chain state. If the miner has over a threshold of power +// the miner meets the minimum. If the network is a below a threshold of +// miners and has power > zero the miner meets the minimum. +func (st *State) MinerNominalPowerMeetsConsensusMinimum(s adt.Store, miner addr.Address) (bool, error) { //nolint:deadcode,unused + claims, err := adt.AsMap(s, st.Claims) + if err != nil { + return false, xerrors.Errorf("failed to load claims: %w", err) + } + + claim, ok, err := getClaim(claims, miner) + if err != nil { + return false, err + } + if !ok { + return false, errors.Errorf("no claim for actor %v", miner) + } + + minerNominalPower := claim.QualityAdjPower + + // if miner is larger than min power requirement, we're set + if minerNominalPower.GreaterThanEqual(ConsensusMinerMinPower) { + return true, nil + } + + // otherwise, if ConsensusMinerMinMiners miners meet min power requirement, return false + if st.MinerAboveMinPowerCount >= ConsensusMinerMinMiners { + return false, nil + } + + // If fewer than ConsensusMinerMinMiners over threshold miner can win a block with non-zero power + return minerNominalPower.GreaterThanEqual(abi.NewStoragePower(0)), nil +} + +// Parameters may be negative to subtract. +func (st *State) AddToClaim(s adt.Store, miner addr.Address, power abi.StoragePower, qapower abi.StoragePower) error { + claims, err := adt.AsMap(s, st.Claims) + if err != nil { + return xerrors.Errorf("failed to load claims: %w", err) + } + + if err := st.addToClaim(claims, miner, power, qapower); err != nil { + return xerrors.Errorf("failed to add claim: %w", err) + } + + st.Claims, err = claims.Root() + if err != nil { + return xerrors.Errorf("failed to flush claims: %w", err) + } + + return nil +} + +func (st *State) addToClaim(claims *adt.Map, miner addr.Address, power abi.StoragePower, qapower abi.StoragePower) error { + oldClaim, ok, err := getClaim(claims, miner) + if err != nil { + return fmt.Errorf("failed to get claim: %w", err) + } + if !ok { + return exitcode.ErrNotFound.Wrapf("no claim for actor %v", miner) + } + + // TotalBytes always update directly + st.TotalQABytesCommitted = big.Add(st.TotalQABytesCommitted, qapower) + st.TotalBytesCommitted = big.Add(st.TotalBytesCommitted, power) + + newClaim := Claim{ + RawBytePower: big.Add(oldClaim.RawBytePower, power), + QualityAdjPower: big.Add(oldClaim.QualityAdjPower, qapower), + } + + prevBelow := oldClaim.QualityAdjPower.LessThan(ConsensusMinerMinPower) + stillBelow := newClaim.QualityAdjPower.LessThan(ConsensusMinerMinPower) + + if prevBelow && !stillBelow { + // just passed min miner size + st.MinerAboveMinPowerCount++ + st.TotalQualityAdjPower = big.Add(st.TotalQualityAdjPower, newClaim.QualityAdjPower) + st.TotalRawBytePower = big.Add(st.TotalRawBytePower, newClaim.RawBytePower) + } else if !prevBelow && stillBelow { + // just went below min miner size + st.MinerAboveMinPowerCount-- + st.TotalQualityAdjPower = big.Sub(st.TotalQualityAdjPower, oldClaim.QualityAdjPower) + st.TotalRawBytePower = big.Sub(st.TotalRawBytePower, oldClaim.RawBytePower) + } else if !prevBelow && !stillBelow { + // Was above the threshold, still above + st.TotalQualityAdjPower = big.Add(st.TotalQualityAdjPower, qapower) + st.TotalRawBytePower = big.Add(st.TotalRawBytePower, power) + } + + AssertMsg(newClaim.RawBytePower.GreaterThanEqual(big.Zero()), "negative claimed raw byte power: %v", newClaim.RawBytePower) + AssertMsg(newClaim.QualityAdjPower.GreaterThanEqual(big.Zero()), "negative claimed quality adjusted power: %v", newClaim.QualityAdjPower) + AssertMsg(st.MinerAboveMinPowerCount >= 0, "negative number of miners larger than min: %v", st.MinerAboveMinPowerCount) + return setClaim(claims, miner, &newClaim) +} + +func getClaim(claims *adt.Map, a addr.Address) (*Claim, bool, error) { + var out Claim + found, err := claims.Get(abi.AddrKey(a), &out) + if err != nil { + return nil, false, errors.Wrapf(err, "failed to get claim for address %v", a) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +func (st *State) addPledgeTotal(amount abi.TokenAmount) { + st.TotalPledgeCollateral = big.Add(st.TotalPledgeCollateral, amount) + AssertMsg(st.TotalPledgeCollateral.GreaterThanEqual(big.Zero()), "pledged amount cannot be negative") +} + +func (st *State) appendCronEvent(events *adt.Multimap, epoch abi.ChainEpoch, event *CronEvent) error { + // if event is in past, alter FirstCronEpoch so it will be found. + if epoch < st.FirstCronEpoch { + st.FirstCronEpoch = epoch + } + + if err := events.Add(epochKey(epoch), event); err != nil { + return xerrors.Errorf("failed to store cron event at epoch %v for miner %v: %w", epoch, event, err) + } + + return nil +} + +func (st *State) updateSmoothedEstimate(delta abi.ChainEpoch) { + filterQAPower := smoothing.LoadFilter(st.ThisEpochQAPowerSmoothed, smoothing.DefaultAlpha, smoothing.DefaultBeta) + st.ThisEpochQAPowerSmoothed = filterQAPower.NextEstimate(st.ThisEpochQualityAdjPower, delta) +} + +func loadCronEvents(mmap *adt.Multimap, epoch abi.ChainEpoch) ([]CronEvent, error) { + var events []CronEvent + var ev CronEvent + err := mmap.ForEach(epochKey(epoch), &ev, func(i int64) error { + events = append(events, ev) + return nil + }) + return events, err +} + +func setClaim(claims *adt.Map, a addr.Address, claim *Claim) error { + Assert(claim.RawBytePower.GreaterThanEqual(big.Zero())) + Assert(claim.QualityAdjPower.GreaterThanEqual(big.Zero())) + + if err := claims.Put(abi.AddrKey(a), claim); err != nil { + return xerrors.Errorf("failed to put claim with address %s power %v: %w", a, claim, err) + } + + return nil +} + +// CurrentTotalPower returns current power values accounting for minimum miner +// and minimum power +func CurrentTotalPower(st *State) (abi.StoragePower, abi.StoragePower) { + if st.MinerAboveMinPowerCount < ConsensusMinerMinMiners { + return st.TotalBytesCommitted, st.TotalQABytesCommitted + } + return st.TotalRawBytePower, st.TotalQualityAdjPower +} + +func epochKey(e abi.ChainEpoch) abi.Keyer { + return abi.IntKey(int64(e)) +} + +func init() { + // Check that ChainEpoch is indeed a signed integer to confirm that epochKey is making the right interpretation. + var e abi.ChainEpoch + if reflect.TypeOf(e).Kind() != reflect.Int64 { + panic("incorrect chain epoch encoding") + } + +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/cbor_gen.go new file mode 100644 index 0000000000..778d5b78af --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/cbor_gen.go @@ -0,0 +1,431 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package reward + +import ( + "fmt" + "io" + + abi "github.com/filecoin-project/go-state-types/abi" + smoothing "github.com/filecoin-project/specs-actors/actors/util/smoothing" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufState = []byte{137} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.CumsumBaseline (big.Int) (struct) + if err := t.CumsumBaseline.MarshalCBOR(w); err != nil { + return err + } + + // t.CumsumRealized (big.Int) (struct) + if err := t.CumsumRealized.MarshalCBOR(w); err != nil { + return err + } + + // t.EffectiveNetworkTime (abi.ChainEpoch) (int64) + if t.EffectiveNetworkTime >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EffectiveNetworkTime)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EffectiveNetworkTime-1)); err != nil { + return err + } + } + + // t.EffectiveBaselinePower (big.Int) (struct) + if err := t.EffectiveBaselinePower.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochReward (big.Int) (struct) + if err := t.ThisEpochReward.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochRewardSmoothed (smoothing.FilterEstimate) (struct) + if err := t.ThisEpochRewardSmoothed.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochBaselinePower (big.Int) (struct) + if err := t.ThisEpochBaselinePower.MarshalCBOR(w); err != nil { + return err + } + + // t.Epoch (abi.ChainEpoch) (int64) + if t.Epoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + + // t.TotalMined (big.Int) (struct) + if err := t.TotalMined.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 9 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.CumsumBaseline (big.Int) (struct) + + { + + if err := t.CumsumBaseline.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CumsumBaseline: %w", err) + } + + } + // t.CumsumRealized (big.Int) (struct) + + { + + if err := t.CumsumRealized.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CumsumRealized: %w", err) + } + + } + // t.EffectiveNetworkTime (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EffectiveNetworkTime = abi.ChainEpoch(extraI) + } + // t.EffectiveBaselinePower (big.Int) (struct) + + { + + if err := t.EffectiveBaselinePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.EffectiveBaselinePower: %w", err) + } + + } + // t.ThisEpochReward (big.Int) (struct) + + { + + if err := t.ThisEpochReward.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochReward: %w", err) + } + + } + // t.ThisEpochRewardSmoothed (smoothing.FilterEstimate) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.ThisEpochRewardSmoothed = new(smoothing.FilterEstimate) + if err := t.ThisEpochRewardSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochRewardSmoothed pointer: %w", err) + } + } + + } + // t.ThisEpochBaselinePower (big.Int) (struct) + + { + + if err := t.ThisEpochBaselinePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochBaselinePower: %w", err) + } + + } + // t.Epoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + // t.TotalMined (big.Int) (struct) + + { + + if err := t.TotalMined.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalMined: %w", err) + } + + } + return nil +} + +var lengthBufAwardBlockRewardParams = []byte{132} + +func (t *AwardBlockRewardParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufAwardBlockRewardParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Miner (address.Address) (struct) + if err := t.Miner.MarshalCBOR(w); err != nil { + return err + } + + // t.Penalty (big.Int) (struct) + if err := t.Penalty.MarshalCBOR(w); err != nil { + return err + } + + // t.GasReward (big.Int) (struct) + if err := t.GasReward.MarshalCBOR(w); err != nil { + return err + } + + // t.WinCount (int64) (int64) + if t.WinCount >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WinCount)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WinCount-1)); err != nil { + return err + } + } + return nil +} + +func (t *AwardBlockRewardParams) UnmarshalCBOR(r io.Reader) error { + *t = AwardBlockRewardParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miner (address.Address) (struct) + + { + + if err := t.Miner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } + + } + // t.Penalty (big.Int) (struct) + + { + + if err := t.Penalty.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Penalty: %w", err) + } + + } + // t.GasReward (big.Int) (struct) + + { + + if err := t.GasReward.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.GasReward: %w", err) + } + + } + // t.WinCount (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.WinCount = int64(extraI) + } + return nil +} + +var lengthBufThisEpochRewardReturn = []byte{131} + +func (t *ThisEpochRewardReturn) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufThisEpochRewardReturn); err != nil { + return err + } + + // t.ThisEpochReward (big.Int) (struct) + if err := t.ThisEpochReward.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochRewardSmoothed (smoothing.FilterEstimate) (struct) + if err := t.ThisEpochRewardSmoothed.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochBaselinePower (big.Int) (struct) + if err := t.ThisEpochBaselinePower.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ThisEpochRewardReturn) UnmarshalCBOR(r io.Reader) error { + *t = ThisEpochRewardReturn{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ThisEpochReward (big.Int) (struct) + + { + + if err := t.ThisEpochReward.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochReward: %w", err) + } + + } + // t.ThisEpochRewardSmoothed (smoothing.FilterEstimate) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.ThisEpochRewardSmoothed = new(smoothing.FilterEstimate) + if err := t.ThisEpochRewardSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochRewardSmoothed pointer: %w", err) + } + } + + } + // t.ThisEpochBaselinePower (big.Int) (struct) + + { + + if err := t.ThisEpochBaselinePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochBaselinePower: %w", err) + } + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/reward_actor.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/reward_actor.go new file mode 100644 index 0000000000..773a394770 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/reward_actor.go @@ -0,0 +1,188 @@ +package reward + +import ( + "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/runtime" + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +type Actor struct{} + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.AwardBlockReward, + 3: a.ThisEpochReward, + 4: a.UpdateNetworkKPI, + } +} + +func (a Actor) Code() cid.Cid { + return builtin.RewardActorCodeID +} + +func (a Actor) IsSingleton() bool { + return true +} + +func (a Actor) State() cbor.Er { + return new(State) +} + +var _ runtime.VMActor = Actor{} + +func (a Actor) Constructor(rt runtime.Runtime, currRealizedPower *abi.StoragePower) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + + if currRealizedPower == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "arugment should not be nil") + return nil // linter does not understand abort exiting + } + st := ConstructState(*currRealizedPower) + rt.StateCreate(st) + return nil +} + +type AwardBlockRewardParams struct { + Miner address.Address + Penalty abi.TokenAmount // penalty for including bad messages in a block, >= 0 + GasReward abi.TokenAmount // gas reward from all gas fees in a block, >= 0 + WinCount int64 // number of reward units won, > 0 +} + +// Awards a reward to a block producer. +// This method is called only by the system actor, implicitly, as the last message in the evaluation of a block. +// The system actor thus computes the parameters and attached value. +// +// The reward includes two components: +// - the epoch block reward, computed and paid from the reward actor's balance, +// - the block gas reward, expected to be transferred to the reward actor with this invocation. +// +// The reward is reduced before the residual is credited to the block producer, by: +// - a penalty amount, provided as a parameter, which is burnt, +func (a Actor) AwardBlockReward(rt runtime.Runtime, params *AwardBlockRewardParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + priorBalance := rt.CurrentBalance() + if params.Penalty.LessThan(big.Zero()) { + rt.Abortf(exitcode.ErrIllegalArgument, "negative penalty %v", params.Penalty) + } + if params.GasReward.LessThan(big.Zero()) { + rt.Abortf(exitcode.ErrIllegalArgument, "negative gas reward %v", params.GasReward) + } + if priorBalance.LessThan(params.GasReward) { + rt.Abortf(exitcode.ErrIllegalState, "actor current balance %v insufficient to pay gas reward %v", + priorBalance, params.GasReward) + } + if params.WinCount <= 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid win count %d", params.WinCount) + } + + minerAddr, ok := rt.ResolveAddress(params.Miner) + if !ok { + rt.Abortf(exitcode.ErrNotFound, "failed to resolve given owner address") + } + + penalty := abi.NewTokenAmount(0) + totalReward := big.Zero() + var st State + rt.StateTransaction(&st, func() { + blockReward := big.Mul(st.ThisEpochReward, big.NewInt(params.WinCount)) + blockReward = big.Div(blockReward, big.NewInt(builtin.ExpectedLeadersPerEpoch)) + totalReward = big.Add(blockReward, params.GasReward) + currBalance := rt.CurrentBalance() + if totalReward.GreaterThan(currBalance) { + rt.Log(rtt.WARN, "reward actor balance %d below totalReward expected %d, paying out rest of balance", currBalance, totalReward) + totalReward = currBalance + + blockReward = big.Sub(totalReward, params.GasReward) + // Since we have already asserted the balance is greater than gas reward blockReward is >= 0 + AssertMsg(blockReward.GreaterThanEqual(big.Zero()), "programming error, block reward is %v below zero", blockReward) + } + st.TotalMined = big.Add(st.TotalMined, blockReward) + }) + + // Cap the penalty at the total reward value. + penalty = big.Min(params.Penalty, totalReward) + + // Reduce the payable reward by the penalty. + rewardPayable := big.Sub(totalReward, penalty) + + AssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance), + "reward payable %v + penalty %v exceeds balance %v", rewardPayable, penalty, priorBalance) + + // if this fails, we can assume the miner is responsible and avoid failing here. + code := rt.Send(minerAddr, builtin.MethodsMiner.AddLockedFund, &rewardPayable, rewardPayable, &builtin.Discard{}) + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to send AddLockedFund call to the miner actor with funds: %v, code: %v", rewardPayable, code) + code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, rewardPayable, &builtin.Discard{}) + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to send unsent reward to the burnt funds actor, code: %v", code) + } + } + + // Burn the penalty amount. + if penalty.GreaterThan(abi.NewTokenAmount(0)) { + code = rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty, &builtin.Discard{}) + builtin.RequireSuccess(rt, code, "failed to send penalty to burnt funds actor") + } + + return nil +} + +type ThisEpochRewardReturn struct { + ThisEpochReward abi.TokenAmount + ThisEpochRewardSmoothed *smoothing.FilterEstimate + ThisEpochBaselinePower abi.StoragePower +} + +// The award value used for the current epoch, updated at the end of an epoch +// through cron tick. In the case previous epochs were null blocks this +// is the reward value as calculated at the last non-null epoch. +func (a Actor) ThisEpochReward(rt runtime.Runtime, _ *abi.EmptyValue) *ThisEpochRewardReturn { + rt.ValidateImmediateCallerAcceptAny() + + var st State + rt.StateReadonly(&st) + return &ThisEpochRewardReturn{ + ThisEpochReward: st.ThisEpochReward, + ThisEpochBaselinePower: st.ThisEpochBaselinePower, + ThisEpochRewardSmoothed: st.ThisEpochRewardSmoothed, + } +} + +// Called at the end of each epoch by the power actor (in turn by its cron hook). +// This is only invoked for non-empty tipsets, but catches up any number of null +// epochs to compute the next epoch reward. +func (a Actor) UpdateNetworkKPI(rt runtime.Runtime, currRealizedPower *abi.StoragePower) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.StoragePowerActorAddr) + if currRealizedPower == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "arugment should not be nil") + } + networkVersion := rt.NetworkVersion() + + var st State + rt.StateTransaction(&st, func() { + prev := st.Epoch + // if there were null runs catch up the computation until + // st.Epoch == rt.CurrEpoch() + for st.Epoch < rt.CurrEpoch() { + // Update to next epoch to process null rounds + st.updateToNextEpoch(*currRealizedPower, networkVersion) + } + + st.updateToNextEpochWithReward(*currRealizedPower, networkVersion) + // only update smoothed estimates after updating reward and epoch + st.updateSmoothedEstimates(st.Epoch - prev) + }) + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/reward_state.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/reward_state.go new file mode 100644 index 0000000000..b230939393 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/reward/reward_state.go @@ -0,0 +1,107 @@ +package reward + +import ( + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/specs-actors/actors/util/smoothing" +) + +// A quantity of space * time (in byte-epochs) representing power committed to the network for some duration. +type Spacetime = big.Int + +// 36.266260308195979333 FIL +// https://www.wolframalpha.com/input/?i=IntegerPart%5B330%2C000%2C000+*+%281+-+Exp%5B-Log%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29%5D%29+*+10%5E18%5D +const InitialRewardPositionEstimateStr = "36266260308195979333" + +var InitialRewardPositionEstimate = big.MustFromString(InitialRewardPositionEstimateStr) + +// -1.0982489*10^-7 FIL per epoch. Change of simple minted tokens between epochs 0 and 1 +// https://www.wolframalpha.com/input/?i=IntegerPart%5B%28Exp%5B-Log%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29%5D+-+1%29+*+10%5E18%5D +var InitialRewardVelocityEstimate = abi.NewTokenAmount(-109897758509) + +type State struct { + // CumsumBaseline is a target CumsumRealized needs to reach for EffectiveNetworkTime to increase + // CumsumBaseline and CumsumRealized are expressed in byte-epochs. + CumsumBaseline Spacetime + + // CumsumRealized is cumulative sum of network power capped by BalinePower(epoch) + CumsumRealized Spacetime + + // EffectiveNetworkTime is ceiling of real effective network time `theta` based on + // CumsumBaselinePower(theta) == CumsumRealizedPower + // Theta captures the notion of how much the network has progressed in its baseline + // and in advancing network time. + EffectiveNetworkTime abi.ChainEpoch + + // EffectiveBaselinePower is the baseline power at the EffectiveNetworkTime epoch + EffectiveBaselinePower abi.StoragePower + + // The reward to be paid in per WinCount to block producers. + // The actual reward total paid out depends on the number of winners in any round. + // This value is recomputed every non-null epoch and used in the next non-null epoch. + ThisEpochReward abi.TokenAmount + // Smoothed ThisEpochReward + ThisEpochRewardSmoothed *smoothing.FilterEstimate + + // The baseline power the network is targeting at st.Epoch + ThisEpochBaselinePower abi.StoragePower + + // Epoch tracks for which epoch the Reward was computed + Epoch abi.ChainEpoch + + // TotalMined tracks the total FIL awared to block miners + TotalMined abi.TokenAmount +} + +func ConstructState(currRealizedPower abi.StoragePower) *State { + st := &State{ + CumsumBaseline: big.Zero(), + CumsumRealized: big.Zero(), + EffectiveNetworkTime: 0, + EffectiveBaselinePower: BaselineInitialValueV0, + + ThisEpochReward: big.Zero(), + ThisEpochBaselinePower: InitBaselinePower(), + Epoch: -1, + + ThisEpochRewardSmoothed: smoothing.NewEstimate(InitialRewardPositionEstimate, InitialRewardVelocityEstimate), + TotalMined: big.Zero(), + } + + st.updateToNextEpochWithReward(currRealizedPower, network.Version0) + + return st +} + +// Takes in current realized power and updates internal state +// Used for update of internal state during null rounds +func (st *State) updateToNextEpoch(currRealizedPower abi.StoragePower, nv network.Version) { + st.Epoch++ + st.ThisEpochBaselinePower = BaselinePowerFromPrev(st.ThisEpochBaselinePower, nv) + cappedRealizedPower := big.Min(st.ThisEpochBaselinePower, currRealizedPower) + st.CumsumRealized = big.Add(st.CumsumRealized, cappedRealizedPower) + + for st.CumsumRealized.GreaterThan(st.CumsumBaseline) { + st.EffectiveNetworkTime++ + st.EffectiveBaselinePower = BaselinePowerFromPrev(st.EffectiveBaselinePower, nv) + st.CumsumBaseline = big.Add(st.CumsumBaseline, st.EffectiveBaselinePower) + } +} + +// Takes in a current realized power for a reward epoch and computes +// and updates reward state to track reward for the next epoch +func (st *State) updateToNextEpochWithReward(currRealizedPower abi.StoragePower, nv network.Version) { + prevRewardTheta := computeRTheta(st.EffectiveNetworkTime, st.EffectiveBaselinePower, st.CumsumRealized, st.CumsumBaseline) + st.updateToNextEpoch(currRealizedPower, nv) + currRewardTheta := computeRTheta(st.EffectiveNetworkTime, st.EffectiveBaselinePower, st.CumsumRealized, st.CumsumBaseline) + + st.ThisEpochReward = computeReward(st.Epoch, prevRewardTheta, currRewardTheta) + +} + +func (st *State) updateSmoothedEstimates(delta abi.ChainEpoch) { + filterReward := smoothing.LoadFilter(st.ThisEpochRewardSmoothed, smoothing.DefaultAlpha, smoothing.DefaultBeta) + st.ThisEpochRewardSmoothed = filterReward.NextEstimate(st.ThisEpochReward, delta) +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/shared.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/shared.go new file mode 100644 index 0000000000..40eab91c90 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/shared.go @@ -0,0 +1,95 @@ +package builtin + +import ( + "fmt" + "io" + + addr "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + exitcode "github.com/filecoin-project/go-state-types/exitcode" + + runtime "github.com/filecoin-project/specs-actors/actors/runtime" +) + +///// Code shared by multiple built-in actors. ///// + +// Aborts with an ErrIllegalArgument if predicate is not true. +func RequireParam(rt runtime.Runtime, predicate bool, msg string, args ...interface{}) { + if !predicate { + rt.Abortf(exitcode.ErrIllegalArgument, msg, args...) + } +} + +// Propagates a failed send by aborting the current method with the same exit code. +func RequireSuccess(rt runtime.Runtime, e exitcode.ExitCode, msg string, args ...interface{}) { + if !e.IsSuccess() { + rt.Abortf(e, msg, args...) + } +} + +// Aborts with a formatted message if err is not nil. +// The provided message will be suffixed by ": %s" and the provided args suffixed by the err. +func RequireNoErr(rt runtime.Runtime, err error, defaultExitCode exitcode.ExitCode, msg string, args ...interface{}) { + if err != nil { + newMsg := msg + ": %s" + newArgs := append(args, err) + code := exitcode.Unwrap(err, defaultExitCode) + rt.Abortf(code, newMsg, newArgs...) + } +} + +func RequestMinerControlAddrs(rt runtime.Runtime, minerAddr addr.Address) (ownerAddr addr.Address, workerAddr addr.Address, controlAddrs []addr.Address) { + var addrs MinerAddrs + code := rt.Send(minerAddr, MethodsMiner.ControlAddresses, nil, abi.NewTokenAmount(0), &addrs) + RequireSuccess(rt, code, "failed fetching control addresses") + + return addrs.Owner, addrs.Worker, addrs.ControlAddrs +} + +// This type duplicates the Miner.ControlAddresses return type, to work around a circular dependency between actors. +type MinerAddrs struct { + Owner addr.Address + Worker addr.Address + ControlAddrs []addr.Address +} + +type ConfirmSectorProofsParams struct { + Sectors []abi.SectorNumber +} + +// ResolveToIDAddr resolves the given address to it's ID address form. +// If an ID address for the given address dosen't exist yet, it tries to create one by sending a zero balance to the given address. +func ResolveToIDAddr(rt runtime.Runtime, address addr.Address) (addr.Address, error) { + // if we are able to resolve it to an ID address, return the resolved address + idAddr, found := rt.ResolveAddress(address) + if found { + return idAddr, nil + } + + // send 0 balance to the account so an ID address for it is created and then try to resolve + code := rt.Send(address, MethodSend, nil, abi.NewTokenAmount(0), &Discard{}) + if !code.IsSuccess() { + return address, code.Wrapf("failed to send zero balance to address %v", address) + } + + // now try to resolve it to an ID address -> fail if not possible + idAddr, found = rt.ResolveAddress(address) + if !found { + return address, fmt.Errorf("failed to resolve address %v to ID address even after sending zero balance", address) + } + + return idAddr, nil +} + +// Discard is a helper +type Discard struct{} + +func (d *Discard) MarshalCBOR(_ io.Writer) error { + // serialization is a noop + return nil +} + +func (d *Discard) UnmarshalCBOR(_ io.Reader) error { + // deserialization is a noop + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/singletons.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/singletons.go new file mode 100644 index 0000000000..87715a9434 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/singletons.go @@ -0,0 +1,29 @@ +package builtin + +import ( + addr "github.com/filecoin-project/go-address" + + autil "github.com/filecoin-project/specs-actors/actors/util" +) + +// Addresses for singleton system actors. +var ( + // Distinguished AccountActor that is the source of system implicit messages. + SystemActorAddr = mustMakeAddress(0) + InitActorAddr = mustMakeAddress(1) + RewardActorAddr = mustMakeAddress(2) + CronActorAddr = mustMakeAddress(3) + StoragePowerActorAddr = mustMakeAddress(4) + StorageMarketActorAddr = mustMakeAddress(5) + VerifiedRegistryActorAddr = mustMakeAddress(6) + // Distinguished AccountActor that is the destination of all burnt funds. + BurntFundsActorAddr = mustMakeAddress(99) +) + +const FirstNonSingletonActorId = 100 + +func mustMakeAddress(id uint64) addr.Address { + address, err := addr.NewIDAddress(id) + autil.AssertNoError(err) + return address +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/cbor_gen.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/cbor_gen.go new file mode 100644 index 0000000000..bc7fec65bc --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/cbor_gen.go @@ -0,0 +1,348 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package verifreg + +import ( + "fmt" + "io" + + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufState = []byte{131} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.RootKey (address.Address) (struct) + if err := t.RootKey.MarshalCBOR(w); err != nil { + return err + } + + // t.Verifiers (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Verifiers); err != nil { + return xerrors.Errorf("failed to write cid field t.Verifiers: %w", err) + } + + // t.VerifiedClients (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.VerifiedClients); err != nil { + return xerrors.Errorf("failed to write cid field t.VerifiedClients: %w", err) + } + + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.RootKey (address.Address) (struct) + + { + + if err := t.RootKey.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.RootKey: %w", err) + } + + } + // t.Verifiers (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Verifiers: %w", err) + } + + t.Verifiers = c + + } + // t.VerifiedClients (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.VerifiedClients: %w", err) + } + + t.VerifiedClients = c + + } + return nil +} + +var lengthBufAddVerifierParams = []byte{130} + +func (t *AddVerifierParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufAddVerifierParams); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if err := t.Address.MarshalCBOR(w); err != nil { + return err + } + + // t.Allowance (big.Int) (struct) + if err := t.Allowance.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *AddVerifierParams) UnmarshalCBOR(r io.Reader) error { + *t = AddVerifierParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Address (address.Address) (struct) + + { + + if err := t.Address.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.Allowance (big.Int) (struct) + + { + + if err := t.Allowance.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Allowance: %w", err) + } + + } + return nil +} + +var lengthBufAddVerifiedClientParams = []byte{130} + +func (t *AddVerifiedClientParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufAddVerifiedClientParams); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if err := t.Address.MarshalCBOR(w); err != nil { + return err + } + + // t.Allowance (big.Int) (struct) + if err := t.Allowance.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *AddVerifiedClientParams) UnmarshalCBOR(r io.Reader) error { + *t = AddVerifiedClientParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Address (address.Address) (struct) + + { + + if err := t.Address.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.Allowance (big.Int) (struct) + + { + + if err := t.Allowance.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Allowance: %w", err) + } + + } + return nil +} + +var lengthBufUseBytesParams = []byte{130} + +func (t *UseBytesParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufUseBytesParams); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if err := t.Address.MarshalCBOR(w); err != nil { + return err + } + + // t.DealSize (big.Int) (struct) + if err := t.DealSize.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *UseBytesParams) UnmarshalCBOR(r io.Reader) error { + *t = UseBytesParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Address (address.Address) (struct) + + { + + if err := t.Address.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.DealSize (big.Int) (struct) + + { + + if err := t.DealSize.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealSize: %w", err) + } + + } + return nil +} + +var lengthBufRestoreBytesParams = []byte{130} + +func (t *RestoreBytesParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufRestoreBytesParams); err != nil { + return err + } + + // t.Address (address.Address) (struct) + if err := t.Address.MarshalCBOR(w); err != nil { + return err + } + + // t.DealSize (big.Int) (struct) + if err := t.DealSize.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *RestoreBytesParams) UnmarshalCBOR(r io.Reader) error { + *t = RestoreBytesParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Address (address.Address) (struct) + + { + + if err := t.Address.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Address: %w", err) + } + + } + // t.DealSize (big.Int) (struct) + + { + + if err := t.DealSize.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DealSize: %w", err) + } + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/verified_registry_actor.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/verified_registry_actor.go new file mode 100644 index 0000000000..07f4a30240 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/verified_registry_actor.go @@ -0,0 +1,301 @@ +package verifreg + +import ( + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/specs-actors/actors/runtime" + . "github.com/filecoin-project/specs-actors/actors/util" + "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +type Actor struct{} + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.AddVerifier, + 3: a.RemoveVerifier, + 4: a.AddVerifiedClient, + 5: a.UseBytes, + 6: a.RestoreBytes, + } +} + +func (a Actor) Code() cid.Cid { + return builtin.VerifiedRegistryActorCodeID +} + +func (a Actor) IsSingleton() bool { + return true +} + +func (a Actor) State() cbor.Er { + return new(State) +} + +var _ runtime.VMActor = Actor{} + +//////////////////////////////////////////////////////////////////////////////// +// Actor methods +//////////////////////////////////////////////////////////////////////////////// + +func (a Actor) Constructor(rt runtime.Runtime, rootKey *addr.Address) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + + // root should be an ID address + idAddr, ok := rt.ResolveAddress(*rootKey) + builtin.RequireParam(rt, ok, "root should be an ID address") + + emptyMap, err := adt.MakeEmptyMap(adt.AsStore(rt)).Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create state") + + st := ConstructState(emptyMap, idAddr) + rt.StateCreate(st) + return nil +} + +type AddVerifierParams struct { + Address addr.Address + Allowance DataCap +} + +func (a Actor) AddVerifier(rt runtime.Runtime, params *AddVerifierParams) *abi.EmptyValue { + if params.Allowance.LessThan(MinVerifiedDealSize) { + rt.Abortf(exitcode.ErrIllegalArgument, "Allowance %d below MinVerifiedDealSize for add verifier %v", params.Allowance, params.Address) + } + + var st State + rt.StateReadonly(&st) + rt.ValidateImmediateCallerIs(st.RootKey) + + // TODO We need to resolve the verifier address to an ID address before making this comparison. + // https://github.com/filecoin-project/specs-actors/issues/556 + if params.Address == st.RootKey { + rt.Abortf(exitcode.ErrIllegalArgument, "Rootkey cannot be added as verifier") + } + rt.StateTransaction(&st, func() { + verifiers, err := adt.AsMap(adt.AsStore(rt), st.Verifiers) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verifiers") + + verifiedClients, err := adt.AsMap(adt.AsStore(rt), st.VerifiedClients) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verified clients") + + // A verified client cannot become a verifier + found, err := verifiedClients.Get(abi.AddrKey(params.Address), nil) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed get verified client state for %v", params.Address) + if found { + rt.Abortf(exitcode.ErrIllegalArgument, "verified client %v cannot become a verifier", params.Address) + } + + err = verifiers.Put(abi.AddrKey(params.Address), ¶ms.Allowance) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add verifier") + + st.Verifiers, err = verifiers.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush verifiers") + }) + + return nil +} + +func (a Actor) RemoveVerifier(rt runtime.Runtime, verifierAddr *addr.Address) *abi.EmptyValue { + var st State + rt.StateReadonly(&st) + rt.ValidateImmediateCallerIs(st.RootKey) + + rt.StateTransaction(&st, func() { + verifiers, err := adt.AsMap(adt.AsStore(rt), st.Verifiers) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verifiers") + + err = verifiers.Delete(abi.AddrKey(*verifierAddr)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to remove verifier") + + st.Verifiers, err = verifiers.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush verifiers") + }) + + return nil +} + +type AddVerifiedClientParams struct { + Address addr.Address + Allowance DataCap +} + +func (a Actor) AddVerifiedClient(rt runtime.Runtime, params *AddVerifiedClientParams) *abi.EmptyValue { + if params.Allowance.LessThan(MinVerifiedDealSize) { + rt.Abortf(exitcode.ErrIllegalArgument, "allowance %d below MinVerifiedDealSize for add verified client %v", params.Allowance, params.Address) + } + rt.ValidateImmediateCallerAcceptAny() + + var st State + rt.StateReadonly(&st) + // TODO We need to resolve the client address to an ID address before making this comparison. + // https://github.com/filecoin-project/specs-actors/issues/556 + if st.RootKey == params.Address { + rt.Abortf(exitcode.ErrIllegalArgument, "Rootkey cannot be added as a verified client") + } + + rt.StateTransaction(&st, func() { + verifiers, err := adt.AsMap(adt.AsStore(rt), st.Verifiers) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verifiers") + + verifiedClients, err := adt.AsMap(adt.AsStore(rt), st.VerifiedClients) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verified clients") + + // Validate caller is one of the verifiers. + verifierAddr := rt.Caller() + var verifierCap DataCap + found, err := verifiers.Get(abi.AddrKey(verifierAddr), &verifierCap) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get verifier %v", verifierAddr) + if !found { + rt.Abortf(exitcode.ErrNotFound, "no such verifier %v", verifierAddr) + } + + // Validate client to be added isn't a verifier + found, err = verifiers.Get(abi.AddrKey(params.Address), nil) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get verifier") + if found { + rt.Abortf(exitcode.ErrIllegalArgument, "verifier %v cannot be added as a verified client", params.Address) + } + + // Compute new verifier cap and update. + if verifierCap.LessThan(params.Allowance) { + rt.Abortf(exitcode.ErrIllegalArgument, "add more DataCap (%d) for VerifiedClient than allocated %d", params.Allowance, verifierCap) + } + newVerifierCap := big.Sub(verifierCap, params.Allowance) + + err = verifiers.Put(abi.AddrKey(verifierAddr), &newVerifierCap) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update new verifier cap (%d) for %v", newVerifierCap, verifierAddr) + + // This is a one-time, upfront allocation. + // This allowance cannot be changed by calls to AddVerifiedClient as long as the client has not been removed. + // If parties need more allowance, they need to create a new verified client or use up the the current allowance + // and then create a new verified client. + found, err = verifiedClients.Get(abi.AddrKey(params.Address), nil) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get verified client %v", params.Address) + if found { + rt.Abortf(exitcode.ErrIllegalArgument, "verified client already exists: %v", params.Address) + } + + err = verifiedClients.Put(abi.AddrKey(params.Address), ¶ms.Allowance) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to add verified client %v with cap %d", params.Address, params.Allowance) + + st.Verifiers, err = verifiers.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush verifiers") + + st.VerifiedClients, err = verifiedClients.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush verified clients") + }) + + return nil +} + +type UseBytesParams struct { + Address addr.Address // Address of verified client. + DealSize abi.StoragePower // Number of bytes to use. +} + +// Called by StorageMarketActor during PublishStorageDeals. +// Do not allow partially verified deals (DealSize must be greater than equal to allowed cap). +// Delete VerifiedClient if remaining DataCap is smaller than minimum VerifiedDealSize. +func (a Actor) UseBytes(rt runtime.Runtime, params *UseBytesParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.StorageMarketActorAddr) + + if params.DealSize.LessThan(MinVerifiedDealSize) { + rt.Abortf(exitcode.ErrIllegalArgument, "VerifiedDealSize: %d below minimum in UseBytes", params.DealSize) + } + + var st State + rt.StateTransaction(&st, func() { + verifiedClients, err := adt.AsMap(adt.AsStore(rt), st.VerifiedClients) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verified clients") + + var vcCap DataCap + found, err := verifiedClients.Get(abi.AddrKey(params.Address), &vcCap) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get verified client %v", params.Address) + if !found { + rt.Abortf(exitcode.ErrNotFound, "no such verified client %v", params.Address) + } + Assert(vcCap.GreaterThanEqual(big.Zero())) + + if params.DealSize.GreaterThan(vcCap) { + rt.Abortf(exitcode.ErrIllegalArgument, "DealSize %d exceeds allowable cap: %d for VerifiedClient %v", params.DealSize, vcCap, params.Address) + } + + newVcCap := big.Sub(vcCap, params.DealSize) + if newVcCap.LessThan(MinVerifiedDealSize) { + // Delete entry if remaining DataCap is less than MinVerifiedDealSize. + // Will be restored later if the deal did not get activated with a ProvenSector. + err = verifiedClients.Delete(abi.AddrKey(params.Address)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete verified client %v", params.Address) + } else { + err = verifiedClients.Put(abi.AddrKey(params.Address), &newVcCap) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to update verified client %v with %v", params.Address, newVcCap) + } + + st.VerifiedClients, err = verifiedClients.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush verified clients") + }) + + return nil +} + +type RestoreBytesParams struct { + Address addr.Address + DealSize abi.StoragePower +} + +// Called by HandleInitTimeoutDeals from StorageMarketActor when a VerifiedDeal fails to init. +// Restore allowable cap for the client, creating new entry if the client has been deleted. +func (a Actor) RestoreBytes(rt runtime.Runtime, params *RestoreBytesParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.StorageMarketActorAddr) + + if params.DealSize.LessThan(MinVerifiedDealSize) { + rt.Abortf(exitcode.ErrIllegalArgument, "Below minimum VerifiedDealSize requested in RestoreBytes: %d", params.DealSize) + } + + var st State + rt.StateReadonly(&st) + // TODO We need to resolve the client address to an ID address before making this comparison. + // https://github.com/filecoin-project/specs-actors/issues/556 + if st.RootKey == params.Address { + rt.Abortf(exitcode.ErrIllegalArgument, "Cannot restore allowance for Rootkey") + } + + rt.StateTransaction(&st, func() { + verifiedClients, err := adt.AsMap(adt.AsStore(rt), st.VerifiedClients) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verified clients") + + verifiers, err := adt.AsMap(adt.AsStore(rt), st.Verifiers) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verifiers") + + // validate we are NOT attempting to do this for a verifier + found, err := verifiers.Get(abi.AddrKey(params.Address), nil) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed tp get verifier") + if found { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot restore allowance for a verifier") + } + + var vcCap DataCap + found, err = verifiedClients.Get(abi.AddrKey(params.Address), &vcCap) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get verified client %v", params.Address) + if !found { + vcCap = big.Zero() + } + + newVcCap := big.Add(vcCap, params.DealSize) + err = verifiedClients.Put(abi.AddrKey(params.Address), &newVcCap) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put verified client %v with %v", params.Address, newVcCap) + + st.VerifiedClients, err = verifiedClients.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load verifiers") + }) + + return nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/verified_registry_state.go b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/verified_registry_state.go new file mode 100644 index 0000000000..fb55b98360 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/builtin/verifreg/verified_registry_state.go @@ -0,0 +1,36 @@ +package verifreg + +import ( + addr "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + big "github.com/filecoin-project/go-state-types/big" + cid "github.com/ipfs/go-cid" +) + +// DataCap is an integer number of bytes. +// We can introduce policy changes and replace this in the future. +type DataCap = abi.StoragePower + +type State struct { + // Root key holder multisig. + // Authorize and remove verifiers. + RootKey addr.Address + + // Verifiers authorize VerifiedClients. + // Verifiers delegate their DataCap. + Verifiers cid.Cid // HAMT[addr.Address]DataCap + + // VerifiedClients can add VerifiedClientData, up to DataCap. + VerifiedClients cid.Cid // HAMT[addr.Address]DataCap +} + +var MinVerifiedDealSize abi.StoragePower = big.NewInt(1 << 20) // PARAM_FINISH + +// rootKeyAddress comes from genesis. +func ConstructState(emptyMapCid cid.Cid, rootKeyAddress addr.Address) *State { + return &State{ + RootKey: rootKeyAddress, + Verifiers: emptyMapCid, + VerifiedClients: emptyMapCid, + } +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/runtime.go b/vendor/github.com/filecoin-project/specs-actors/actors/runtime/runtime.go new file mode 100644 index 0000000000..d857a9c091 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/runtime/runtime.go @@ -0,0 +1,266 @@ +package runtime + +import ( + "bytes" + "context" + "io" + + "github.com/filecoin-project/go-address" + addr "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/rt" + cid "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/actors/runtime/proof" +) + +// Runtime is the VM's internal runtime object. +// this is everything that is accessible to actors, beyond parameters. +type Runtime interface { + // Information related to the current message being executed. + // When an actor invokes a method on another actor as a sub-call, these values reflect + // the sub-call context, rather than the top-level context. + Message + + // Provides a handle for the actor's state object. + StateHandle + + // Provides IPLD storage for actor state + Store + + // Provides the system call interface. + Syscalls + + // The network protocol version number at the current epoch. + NetworkVersion() network.Version + + // The current chain epoch number. The genesis block has epoch zero. + CurrEpoch() abi.ChainEpoch + + // Satisfies the requirement that every exported actor method must invoke at least one caller validation + // method before returning, without making any assertions about the caller. + ValidateImmediateCallerAcceptAny() + + // Validates that the immediate caller's address exactly matches one of a set of expected addresses, + // aborting if it does not. + // The caller address is always normalized to an ID address, so expected addresses must be + // ID addresses to have any expectation of passing validation. + ValidateImmediateCallerIs(addrs ...addr.Address) + + // Validates that the immediate caller is an actor with code CID matching one of a set of + // expected CIDs, aborting if it does not. + ValidateImmediateCallerType(types ...cid.Cid) + + // The balance of the receiver. Always >= zero. + CurrentBalance() abi.TokenAmount + + // Resolves an address of any protocol to an ID address (via the Init actor's table). + // This allows resolution of externally-provided SECP, BLS, or actor addresses to the canonical form. + // If the argument is an ID address it is returned directly. + ResolveAddress(address addr.Address) (addr.Address, bool) + + // Look up the code ID at an actor address. + // The address will be resolved as if via ResolveAddress, if necessary, so need not be an ID-address. + GetActorCodeCID(addr addr.Address) (ret cid.Cid, ok bool) + + // GetRandomnessFromBeacon returns a (pseudo)random byte array drawing from a random beacon at a prior epoch. + // The beacon value is combined with the personalization tag, epoch number, and explicitly provided entropy. + // The personalization tag may be any int64 value. + // The epoch must be less than the current epoch. The epoch may be negative, in which case + // it addresses the beacon value from genesis block. + // The entropy may be any byte array, or nil. + GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness + + // GetRandomnessFromTickets samples randomness from the ticket chain. Randomess + // sampled through this method is unique per potential fork, and as a + // result, processes relying on this randomness are tied to whichever fork + // they choose. + // See GetRandomnessFromBeacon for notes about the personalization tag, epoch, and entropy. + GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness + + // Sends a message to another actor, returning the exit code and return value envelope. + // If the invoked method does not return successfully, its state changes (and that of any messages it sent in turn) + // will be rolled back. + Send(toAddr addr.Address, methodNum abi.MethodNum, params cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode + + // Halts execution upon an error from which the receiver cannot recover. The caller will receive the exitcode and + // an empty return value. State changes made within this call will be rolled back. + // This method does not return. + // The provided exit code must be >= exitcode.FirstActorExitCode. + // The message and args are for diagnostic purposes and do not persist on chain. They should be suitable for + // passing to fmt.Errorf(msg, args...). + Abortf(errExitCode exitcode.ExitCode, msg string, args ...interface{}) + + // Computes an address for a new actor. The returned address is intended to uniquely refer to + // the actor even in the event of a chain re-org (whereas an ID-address might refer to a + // different actor after messages are re-ordered). + // Always an ActorExec address. + NewActorAddress() addr.Address + + // Creates an actor with code `codeID` and address `address`, with empty state. + // May only be called by Init actor. + // Aborts if the provided address has previously been created. + CreateActor(codeId cid.Cid, address addr.Address) + + // Deletes the executing actor from the state tree, transferring any balance to beneficiary. + // Aborts if the beneficiary does not exist or is the calling actor. + // May only be called by the actor itself. + DeleteActor(beneficiary addr.Address) + + // Returns the total token supply in circulation at the beginning of the current epoch. + // The circulating supply is the sum of: + // - rewards emitted by the reward actor, + // - funds vested from lock-ups in the genesis state, + // less the sum of: + // - funds burnt, + // - pledge collateral locked in storage miner actors (recorded in the storage power actor) + // - deal collateral locked by the storage market actor + TotalFilCircSupply() abi.TokenAmount + + // Provides a Go context for use by HAMT, etc. + // The VM is intended to provide an idealised machine abstraction, with infinite storage etc, so this context + // should not be used by actor code directly. + Context() context.Context + + // Starts a new tracing span. The span must be End()ed explicitly by invoking or deferring EndSpan + StartSpan(name string) (EndSpan func()) + + // ChargeGas charges specified amount of `gas` for execution. + // `name` provides information about gas charging point + // `virtual` sets virtual amount of gas to charge, this amount is not counted + // toward execution cost. This functionality is used for observing global changes + // in total gas charged if amount of gas charged was to be changed. + ChargeGas(name string, gas int64, virtual int64) + + // Note events that may make debugging easier + Log(level rt.LogLevel, msg string, args ...interface{}) +} + +// Store defines the storage module exposed to actors. +type Store interface { + // Retrieves and deserializes an object from the store into `o`. Returns whether successful. + StoreGet(c cid.Cid, o cbor.Unmarshaler) bool + // Serializes and stores an object, returning its CID. + StorePut(x cbor.Marshaler) cid.Cid +} + +// Message contains information available to the actor about the executing message. +// These values are fixed for the duration of an invocation. +type Message interface { + // The address of the immediate calling actor. Always an ID-address. + // If an actor invokes its own method, Caller() == Receiver(). + Caller() addr.Address + + // The address of the actor receiving the message. Always an ID-address. + Receiver() addr.Address + + // The value attached to the message being processed, implicitly added to CurrentBalance() + // of Receiver() before method invocation. + // This value came from Caller(). + ValueReceived() abi.TokenAmount +} + +// Pure functions implemented as primitives by the runtime. +type Syscalls interface { + // Verifies that a signature is valid for an address and plaintext. + // If the address is a public-key type address, it is used directly. + // If it's an ID-address, the actor is looked up in state. It must be an account actor, and the + // public key is obtained from it's state. + VerifySignature(signature crypto.Signature, signer addr.Address, plaintext []byte) error + // Hashes input data using blake2b with 256 bit output. + HashBlake2b(data []byte) [32]byte + // Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. + ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) + // Verifies a sector seal proof. + VerifySeal(vi proof.SealVerifyInfo) error + + BatchVerifySeals(vis map[address.Address][]proof.SealVerifyInfo) (map[address.Address][]bool, error) + + // Verifies a proof of spacetime. + VerifyPoSt(vi proof.WindowPoStVerifyInfo) error + // Verifies that two block headers provide proof of a consensus fault: + // - both headers mined by the same actor + // - headers are different + // - first header is of the same or lower epoch as the second + // - the headers provide evidence of a fault (see the spec for the different fault types). + // The parameters are all serialized block headers. The third "extra" parameter is consulted only for + // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the + // blocks in an ancestor of h2. + // Returns nil and an error if the headers don't prove a fault. + VerifyConsensusFault(h1, h2, extra []byte) (*ConsensusFault, error) +} + +// StateHandle provides mutable, exclusive access to actor state. +type StateHandle interface { + // Create initializes the state object. + // This is only valid in a constructor function and when the state has not yet been initialized. + StateCreate(obj cbor.Marshaler) + + // Readonly loads a readonly copy of the state into the argument. + // + // Any modification to the state is illegal and will result in an abort. + StateReadonly(obj cbor.Unmarshaler) + + // Transaction loads a mutable version of the state into the `obj` argument and protects + // the execution from side effects (including message send). + // + // The second argument is a function which allows the caller to mutate the state. + // + // If the state is modified after this function returns, execution will abort. + // + // The gas cost of this method is that of a Store.Put of the mutated state object. + // + // Note: the Go signature is not ideal due to lack of type system power. + // + // # Usage + // ```go + // var state SomeState + // ret := rt.StateTransaction(&state, func() (interface{}) { + // // make some changes + // st.ImLoaded = True + // return st.Thing, nil + // }) + // // state.ImLoaded = False // BAD!! state is readonly outside the lambda, it will panic + // ``` + StateTransaction(obj cbor.Er, f func()) +} + +// Result of checking two headers for a consensus fault. +type ConsensusFault struct { + // Address of the miner at fault (always an ID address). + Target addr.Address + // Epoch of the fault, which is the higher epoch of the two blocks causing it. + Epoch abi.ChainEpoch + // Type of fault. + Type ConsensusFaultType +} + +type ConsensusFaultType int64 + +const ( + //ConsensusFaultNone ConsensusFaultType = 0 + ConsensusFaultDoubleForkMining ConsensusFaultType = 1 + ConsensusFaultParentGrinding ConsensusFaultType = 2 + ConsensusFaultTimeOffsetMining ConsensusFaultType = 3 +) + +// Wraps already-serialized bytes as CBOR-marshalable. +type CBORBytes []byte + +func (b CBORBytes) MarshalCBOR(w io.Writer) error { + _, err := w.Write(b) + return err +} + +func (b *CBORBytes) UnmarshalCBOR(r io.Reader) error { + var c bytes.Buffer + _, err := c.ReadFrom(r) + *b = c.Bytes() + return err +} + +type VMActor = rt.VMActor diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/array.go b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/array.go new file mode 100644 index 0000000000..8cd51db575 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/array.go @@ -0,0 +1,109 @@ +package adt + +import ( + "bytes" + + amt "github.com/filecoin-project/go-amt-ipld/v2" + "github.com/filecoin-project/go-state-types/cbor" + cid "github.com/ipfs/go-cid" + errors "github.com/pkg/errors" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +// Array stores a sparse sequence of values in an AMT. +type Array struct { + root *amt.Root + store Store +} + +// AsArray interprets a store as an AMT-based array with root `r`. +func AsArray(s Store, r cid.Cid) (*Array, error) { + root, err := amt.LoadAMT(s.Context(), s, r) + if err != nil { + return nil, xerrors.Errorf("failed to root: %w", err) + } + + return &Array{ + root: root, + store: s, + }, nil +} + +// Creates a new map backed by an empty HAMT and flushes it to the store. +func MakeEmptyArray(s Store) *Array { + root := amt.NewAMT(s) + return &Array{ + root: root, + store: s, + } +} + +// Returns the root CID of the underlying AMT. +func (a *Array) Root() (cid.Cid, error) { + return a.root.Flush(a.store.Context()) +} + +// Appends a value to the end of the array. Assumes continuous array. +// If the array isn't continuous use Set and a separate counter +func (a *Array) AppendContinuous(value cbor.Marshaler) error { + if err := a.root.Set(a.store.Context(), a.root.Count, value); err != nil { + return errors.Wrapf(err, "array append failed to set index %v value %v in root %v, ", a.root.Count, value, a.root) + } + return nil +} + +func (a *Array) Set(i uint64, value cbor.Marshaler) error { + if err := a.root.Set(a.store.Context(), i, value); err != nil { + return xerrors.Errorf("array set failed to set index %v in root %v: %w", i, a.root, err) + } + return nil +} + +func (a *Array) Delete(i uint64) error { + if err := a.root.Delete(a.store.Context(), i); err != nil { + return xerrors.Errorf("array delete failed to delete index %v in root %v: %w", i, a.root, err) + } + return nil +} + +func (a *Array) BatchDelete(ix []uint64) error { + if err := a.root.BatchDelete(a.store.Context(), ix); err != nil { + return xerrors.Errorf("array delete failed to batchdelete: %w", err) + } + return nil +} + +// Iterates all entries in the array, deserializing each value in turn into `out` and then calling a function. +// Iteration halts if the function returns an error. +// If the output parameter is nil, deserialization is skipped. +func (a *Array) ForEach(out cbor.Unmarshaler, fn func(i int64) error) error { + return a.root.ForEach(a.store.Context(), func(k uint64, val *cbg.Deferred) error { + if out != nil { + if deferred, ok := out.(*cbg.Deferred); ok { + // fast-path deferred -> deferred to avoid re-decoding. + *deferred = *val + } else if err := out.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return err + } + } + return fn(int64(k)) + }) +} + +func (a *Array) Length() uint64 { + return a.root.Count +} + +// Get retrieves array element into the 'out' unmarshaler, returning a boolean +// indicating whether the element was found in the array +func (a *Array) Get(k uint64, out cbor.Unmarshaler) (bool, error) { + + if err := a.root.Get(a.store.Context(), k, out); err == nil { + return true, nil + } else if _, nf := err.(*amt.ErrNotFound); nf { + return false, nil + } else { + return false, err + } +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/balancetable.go b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/balancetable.go new file mode 100644 index 0000000000..ba1a80b6d1 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/balancetable.go @@ -0,0 +1,102 @@ +package adt + +import ( + addr "github.com/filecoin-project/go-address" + cid "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" +) + +// A specialization of a map of addresses to (positive) token amounts. +// Absent keys implicitly have a balance of zero. +type BalanceTable Map + +// Interprets a store as balance table with root `r`. +func AsBalanceTable(s Store, r cid.Cid) (*BalanceTable, error) { + m, err := AsMap(s, r) + if err != nil { + return nil, err + } + + return &BalanceTable{ + root: m.root, + store: s, + }, nil +} + +// Returns the root cid of underlying HAMT. +func (t *BalanceTable) Root() (cid.Cid, error) { + return (*Map)(t).Root() +} + +// Gets the balance for a key, which is zero if they key has never been added to. +func (t *BalanceTable) Get(key addr.Address) (abi.TokenAmount, error) { + var value abi.TokenAmount + found, err := (*Map)(t).Get(abi.AddrKey(key), &value) + if !found || err != nil { + value = big.Zero() + } + + return value, err +} + +// Adds an amount to a balance, requiring the resulting balance to be non-negative. +func (t *BalanceTable) Add(key addr.Address, value abi.TokenAmount) error { + prev, err := t.Get(key) + if err != nil { + return err + } + sum := big.Add(prev, value) + sign := sum.Sign() + if sign < 0 { + return xerrors.Errorf("adding %v to balance %v would give negative: %v", value, prev, sum) + } else if sign == 0 && !prev.IsZero() { + return (*Map)(t).Delete(abi.AddrKey(key)) + } + return (*Map)(t).Put(abi.AddrKey(key), &sum) +} + +// Subtracts up to the specified amount from a balance, without reducing the balance below some minimum. +// Returns the amount subtracted. +func (t *BalanceTable) SubtractWithMinimum(key addr.Address, req abi.TokenAmount, floor abi.TokenAmount) (abi.TokenAmount, error) { + prev, err := t.Get(key) + if err != nil { + return big.Zero(), err + } + + available := big.Max(big.Zero(), big.Sub(prev, floor)) + sub := big.Min(available, req) + if sub.Sign() > 0 { + err = t.Add(key, sub.Neg()) + if err != nil { + return big.Zero(), err + } + } + return sub, nil +} + +// MustSubtract subtracts the given amount from the account's balance. +// Returns an error if the account has insufficient balance +func (t *BalanceTable) MustSubtract(key addr.Address, req abi.TokenAmount) error { + prev, err := t.Get(key) + if err != nil { + return err + } + if req.GreaterThan(prev) { + return xerrors.New("couldn't subtract the requested amount") + } + return t.Add(key, req.Neg()) +} + +// Returns the total balance held by this BalanceTable +func (t *BalanceTable) Total() (abi.TokenAmount, error) { + total := big.Zero() + var cur abi.TokenAmount + err := (*Map)(t).ForEach(&cur, func(key string) error { + total = big.Add(total, cur) + return nil + }) + return total, err +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/map.go b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/map.go new file mode 100644 index 0000000000..5d9c573aaf --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/map.go @@ -0,0 +1,128 @@ +package adt + +import ( + "bytes" + + hamt "github.com/filecoin-project/go-hamt-ipld" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + cid "github.com/ipfs/go-cid" + "github.com/minio/sha256-simd" + errors "github.com/pkg/errors" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +// Branching factor of the HAMT. +// This value has been empirically chosen, but the optimal value for maps with different mutation profiles +// may differ, in which case we can expose it for configuration. +const hamtBitwidth = 5 + +// HamtOptions specifies all the options used to construct filecoin HAMTs. +var HamtOptions = []hamt.Option{ + hamt.UseTreeBitWidth(hamtBitwidth), + hamt.UseHashFunction(func(input []byte) []byte { + res := sha256.Sum256(input) + return res[:] + }), +} + +// Map stores key-value pairs in a HAMT. +type Map struct { + lastCid cid.Cid + root *hamt.Node + store Store +} + +// AsMap interprets a store as a HAMT-based map with root `r`. +func AsMap(s Store, r cid.Cid) (*Map, error) { + nd, err := hamt.LoadNode(s.Context(), s, r, HamtOptions...) + if err != nil { + return nil, xerrors.Errorf("failed to load hamt node: %w", err) + } + + return &Map{ + lastCid: r, + root: nd, + store: s, + }, nil +} + +// Creates a new map backed by an empty HAMT and flushes it to the store. +func MakeEmptyMap(s Store) *Map { + nd := hamt.NewNode(s, HamtOptions...) + return &Map{ + lastCid: cid.Undef, + root: nd, + store: s, + } +} + +// Returns the root cid of underlying HAMT. +func (m *Map) Root() (cid.Cid, error) { + if err := m.root.Flush(m.store.Context()); err != nil { + return cid.Undef, xerrors.Errorf("failed to flush map root: %w", err) + } + + c, err := m.store.Put(m.store.Context(), m.root) + if err != nil { + return cid.Undef, xerrors.Errorf("writing map root object: %w", err) + } + m.lastCid = c + + return c, nil +} + +// Put adds value `v` with key `k` to the hamt store. +func (m *Map) Put(k abi.Keyer, v cbor.Marshaler) error { + if err := m.root.Set(m.store.Context(), k.Key(), v); err != nil { + return errors.Wrapf(err, "map put failed set in node %v with key %v value %v", m.lastCid, k.Key(), v) + } + return nil +} + +// Get puts the value at `k` into `out`. +func (m *Map) Get(k abi.Keyer, out cbor.Unmarshaler) (bool, error) { + if err := m.root.Find(m.store.Context(), k.Key(), out); err != nil { + if err == hamt.ErrNotFound { + return false, nil + } + return false, errors.Wrapf(err, "map get failed find in node %v with key %v", m.lastCid, k.Key()) + } + return true, nil +} + +// Delete removes the value at `k` from the hamt store. +func (m *Map) Delete(k abi.Keyer) error { + if err := m.root.Delete(m.store.Context(), k.Key()); err != nil { + return errors.Wrapf(err, "map delete failed in node %v key %v", m.root, k.Key()) + } + + return nil +} + +// Iterates all entries in the map, deserializing each value in turn into `out` and then +// calling a function with the corresponding key. +// Iteration halts if the function returns an error. +// If the output parameter is nil, deserialization is skipped. +func (m *Map) ForEach(out cbor.Unmarshaler, fn func(key string) error) error { + return m.root.ForEach(m.store.Context(), func(k string, val interface{}) error { + if out != nil { + // Why doesn't hamt.ForEach() just return the value as bytes? + err := out.UnmarshalCBOR(bytes.NewReader(val.(*cbg.Deferred).Raw)) + if err != nil { + return err + } + } + return fn(k) + }) +} + +// Collects all the keys from the map into a slice of strings. +func (m *Map) CollectKeys() (out []string, err error) { + err = m.ForEach(nil, func(key string) error { + out = append(out, key) + return nil + }) + return +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/multimap.go b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/multimap.go new file mode 100644 index 0000000000..747b3fcc16 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/multimap.go @@ -0,0 +1,123 @@ +package adt + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + cid "github.com/ipfs/go-cid" + errors "github.com/pkg/errors" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" +) + +// Multimap stores multiple values per key in a HAMT of AMTs. +// The order of insertion of values for each key is retained. +type Multimap struct { + mp *Map +} + +// Interprets a store as a HAMT-based map of AMTs with root `r`. +func AsMultimap(s Store, r cid.Cid) (*Multimap, error) { + m, err := AsMap(s, r) + if err != nil { + return nil, err + } + + return &Multimap{m}, nil +} + +// Creates a new map backed by an empty HAMT and flushes it to the store. +func MakeEmptyMultimap(s Store) *Multimap { + m := MakeEmptyMap(s) + return &Multimap{m} +} + +// Returns the root cid of the underlying HAMT. +func (mm *Multimap) Root() (cid.Cid, error) { + return mm.mp.Root() +} + +// Adds a value for a key. +func (mm *Multimap) Add(key abi.Keyer, value cbor.Marshaler) error { + // Load the array under key, or initialize a new empty one if not found. + array, found, err := mm.Get(key) + if err != nil { + return err + } + if !found { + array = MakeEmptyArray(mm.mp.store) + } + + // Append to the array. + if err = array.AppendContinuous(value); err != nil { + return errors.Wrapf(err, "failed to add multimap key %v value %v", key, value) + } + + c, err := array.Root() + if err != nil { + return xerrors.Errorf("failed to flush child array: %w", err) + } + + // Store the new array root under key. + newArrayRoot := cbg.CborCid(c) + err = mm.mp.Put(key, &newArrayRoot) + if err != nil { + return errors.Wrapf(err, "failed to store multimap values") + } + return nil +} + +// Removes all values for a key. +func (mm *Multimap) RemoveAll(key abi.Keyer) error { + err := mm.mp.Delete(key) + if err != nil { + return errors.Wrapf(err, "failed to delete multimap key %v root %v", key, mm.mp.root) + } + return nil +} + +// Iterates all entries for a key in the order they were inserted, deserializing each value in turn into `out` and then +// calling a function. +// Iteration halts if the function returns an error. +// If the output parameter is nil, deserialization is skipped. +func (mm *Multimap) ForEach(key abi.Keyer, out cbor.Unmarshaler, fn func(i int64) error) error { + array, found, err := mm.Get(key) + if err != nil { + return err + } + if found { + return array.ForEach(out, fn) + } + return nil +} + +func (mm *Multimap) ForAll(fn func(k string, arr *Array) error) error { + var arrRoot cbg.CborCid + if err := mm.mp.ForEach(&arrRoot, func(k string) error { + arr, err := AsArray(mm.mp.store, cid.Cid(arrRoot)) + if err != nil { + return err + } + + return fn(k, arr) + }); err != nil { + return err + } + + return nil +} + +func (mm *Multimap) Get(key abi.Keyer) (*Array, bool, error) { + var arrayRoot cbg.CborCid + found, err := mm.mp.Get(key, &arrayRoot) + if err != nil { + return nil, false, errors.Wrapf(err, "failed to load multimap key %v", key) + } + var array *Array + if found { + array, err = AsArray(mm.mp.store, cid.Cid(arrayRoot)) + if err != nil { + return nil, false, xerrors.Errorf("failed to load value %v as an array: %w", key, err) + } + } + return array, found, nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/set.go b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/set.go new file mode 100644 index 0000000000..331c2e9a89 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/set.go @@ -0,0 +1,60 @@ +package adt + +import ( + "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" +) + +// Set interprets a Map as a set, storing keys (with empty values) in a HAMT. +type Set struct { + m *Map +} + +// AsSet interprets a store as a HAMT-based set with root `r`. +func AsSet(s Store, r cid.Cid) (*Set, error) { + m, err := AsMap(s, r) + if err != nil { + return nil, err + } + + return &Set{ + m: m, + }, nil +} + +// NewSet creates a new HAMT with root `r` and store `s`. +func MakeEmptySet(s Store) *Set { + m := MakeEmptyMap(s) + return &Set{m} +} + +// Root return the root cid of HAMT. +func (h *Set) Root() (cid.Cid, error) { + return h.m.Root() +} + +// Put adds `k` to the set. +func (h *Set) Put(k abi.Keyer) error { + return h.m.Put(k, nil) +} + +// Has returns true iff `k` is in the set. +func (h *Set) Has(k abi.Keyer) (bool, error) { + return h.m.Get(k, nil) +} + +// Delete removes `k` from the set. +func (h *Set) Delete(k abi.Keyer) error { + return h.m.Delete(k) +} + +// ForEach iterates over all values in the set, calling the callback for each value. +// Returning error from the callback stops the iteration. +func (h *Set) ForEach(cb func(k string) error) error { + return h.m.ForEach(nil, cb) +} + +// Collects all the keys from the set into a slice of strings. +func (h *Set) CollectKeys() (out []string, err error) { + return h.m.CollectKeys() +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/store.go b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/store.go new file mode 100644 index 0000000000..c3a5e93f36 --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/store.go @@ -0,0 +1,69 @@ +package adt + +import ( + "context" + + "github.com/filecoin-project/go-state-types/cbor" + exitcode "github.com/filecoin-project/go-state-types/exitcode" + cid "github.com/ipfs/go-cid" + ipldcbor "github.com/ipfs/go-ipld-cbor" + + vmr "github.com/filecoin-project/specs-actors/actors/runtime" +) + +// Store defines an interface required to back the ADTs in this package. +type Store interface { + Context() context.Context + ipldcbor.IpldStore +} + +// Adapts a vanilla IPLD store as an ADT store. +func WrapStore(ctx context.Context, store ipldcbor.IpldStore) Store { + return &wstore{ + ctx: ctx, + IpldStore: store, + } +} + +type wstore struct { + ctx context.Context + ipldcbor.IpldStore +} + +var _ Store = &wstore{} + +func (s *wstore) Context() context.Context { + return s.ctx +} + +// Adapter for a Runtime as an ADT Store. + +// Adapts a Runtime as an ADT store. +func AsStore(rt vmr.Runtime) Store { + return rtStore{rt} +} + +type rtStore struct { + vmr.Runtime +} + +var _ Store = &rtStore{} + +func (r rtStore) Context() context.Context { + return r.Runtime.Context() +} + +func (r rtStore) Get(_ context.Context, c cid.Cid, out interface{}) error { + // The Go context is (un/fortunately?) dropped here. + // See https://github.com/filecoin-project/specs-actors/issues/140 + if !r.StoreGet(c, out.(cbor.Unmarshaler)) { + r.Abortf(exitcode.ErrNotFound, "not found") + } + return nil +} + +func (r rtStore) Put(_ context.Context, v interface{}) (cid.Cid, error) { + // The Go context is (un/fortunately?) dropped here. + // See https://github.com/filecoin-project/specs-actors/issues/140 + return r.StorePut(v.(cbor.Marshaler)), nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/assert.go b/vendor/github.com/filecoin-project/specs-actors/actors/util/assert.go new file mode 100644 index 0000000000..f0e03b848a --- /dev/null +++ b/vendor/github.com/filecoin-project/specs-actors/actors/util/assert.go @@ -0,0 +1,21 @@ +package util + +import "fmt" + +// Indicates a condition that should never happen. If encountered, execution will halt and the +// resulting state is undefined. +func AssertMsg(b bool, format string, a ...interface{}) { + if !b { + panic(fmt.Sprintf(format, a...)) + } +} + +func Assert(b bool) { + AssertMsg(b, "assertion failed") +} + +func AssertNoError(e error) { + if e != nil { + panic(e.Error()) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/.editorconfig b/vendor/github.com/gbrlsnchs/jwt/.editorconfig new file mode 100644 index 0000000000..c6a5f9d7b9 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = false +indent_size = 2 +indent_style = space + +[{README.md,*.go,Makefile}] +indent_style = tab + +[{README.md,*.go,COMMIT_EDITMSG}] +tab_width = 8 + +[Makefile] +indent_size = 4 diff --git a/vendor/github.com/gbrlsnchs/jwt/.github/FUNDING.yml b/vendor/github.com/gbrlsnchs/jwt/.github/FUNDING.yml new file mode 100644 index 0000000000..45a6d4e107 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/.github/FUNDING.yml @@ -0,0 +1,12 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: gbrlsnchs +open_collective: # Replace with a single Open Collective username +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/vendor/github.com/gbrlsnchs/jwt/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/github.com/gbrlsnchs/jwt/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..360dfce75c --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,26 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: bug +assignees: gbrlsnchs + +--- + +**Describe the bug** + + +**To Reproduce** +Steps to reproduce the behavior: +1. + +**Expected behavior** + + +**Please complete the following information:** + - OS: + - Go version + - This package's version + +**Additional context** + diff --git a/vendor/github.com/gbrlsnchs/jwt/.github/ISSUE_TEMPLATE/feature_request.md b/vendor/github.com/gbrlsnchs/jwt/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..c50da1d30b --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: enhancement +assignees: gbrlsnchs + +--- + +**Is your feature request related to a problem? Please describe.** + + +**Describe the solution you'd like** + + +**Describe alternatives you've considered** + + +**Additional context** + diff --git a/vendor/github.com/gbrlsnchs/jwt/.github/workflows/main.yml b/vendor/github.com/gbrlsnchs/jwt/.github/workflows/main.yml new file mode 100644 index 0000000000..ee512284e9 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/.github/workflows/main.yml @@ -0,0 +1,44 @@ +name: 'Linux, macOS and Windows' +on: [push, pull_request] +jobs: + lint: + strategy: + matrix: + go: ['1.12', '1.13'] + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v1 + - name: Set up Go 1.12 + uses: actions/setup-go@v1 + with: + go-version: ${{ matrix.os }} + - name: Lint source code + env: + MAGEFILE_VERBOSE: true + run: | + GOBIN="$(pwd)/.bin" go install github.com/magefile/mage + ./.bin/mage install + ./.bin/mage lint + + test: + needs: lint + strategy: + matrix: + os: [macOS-latest, ubuntu-latest, windows-latest] + go: ['1.11', '1.12', '1.13'] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v1 + - name: Set up Go version ${{ matrix.go }} + uses: actions/setup-go@v1 + with: + go-version: ${{ matrix.go }} + - name: Run test + env: + GO111MODULE: on + MAGEFILE_VERBOSE: true + shell: bash + run: | + GOBIN="$(pwd)/.bin" go install github.com/magefile/mage + ./.bin/mage test diff --git a/vendor/github.com/gbrlsnchs/jwt/.gitignore b/vendor/github.com/gbrlsnchs/jwt/.gitignore new file mode 100644 index 0000000000..6d05b29cc7 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/.gitignore @@ -0,0 +1,3 @@ +tags +*.out +.bin/ diff --git a/vendor/github.com/gbrlsnchs/jwt/CHANGELOG.md b/vendor/github.com/gbrlsnchs/jwt/CHANGELOG.md new file mode 100644 index 0000000000..8efcf48f9b --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/CHANGELOG.md @@ -0,0 +1,156 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] +### Added +- Signing and verifying using [RSA-PSS](https://en.wikipedia.org/wiki/Probabilistic_signature_scheme). +- Signing and verifying using [Ed25519](https://ed25519.cr.yp.to/). +- `Audience` type for handling the `aud` claim [according to the RFC](https://tools.ietf.org/html/rfc7519#section-4.1.3). +- `Algorithm` interface. +- `RawToken` type. +- `Payload` and `Header` structs. +- `Resolver` interface. +- `jwtutil` package and a type that implements `Resolver` that dynamically resolves which algorithm to use. + +### Changed +- Improve performance by storing SHA hash functions in `sync.Pool`. +- Change signing/verifying methods constructors' names. +- Sign tokens with global function `Sign`. +- Verify tokens with global function `Verify`. + +### Fixed +- Allowing arbitrary payload. + +### Removed +- Support for `go1.10`. +- `Marshal` and `Unmarshal` functions. +- `Marshaler` and `Unmarshaler` interfaces. +- `Signer` interface. + +## [2.0.0] - 2018-09-14 +### Added +- `Parse` and `ParseBytes` functions. +- `Marshal` and `Unmarshal` functions. +- `Marshaler` interface. +- `Unmarshaler` interface. +- Content type header parameter. + +### Changed +- Modify `Signer` signature. +- Add claims directly to `JWT` struct. +- Embed `header` to JWT. +- Add README texts, examples and usage. +- Rename `const.go` to `methods.go`. +- Add prefix `New` to signing methods constructors. +- Run `vgo` for testing (this enables testing the package against Go 1.10); + +### Removed +- `Sign` and `Verify` functions. +- Base64 encoding and deconding functions. +- `Options` struct. +- `Claims` struct. +- Functions that extract JWT from contexts and requests. + +## [1.1.0] - 2018-08-22 +### Changed +- Prevent expensive slice reallocation when signing a JWT. +- Refactor tests. + +### Fixed +- Signature of "none" algorithm. + +### Removed +- `internal` package. + +## [1.0.2] - 2018-07-19 +### Removed +- Makefile. +- Benchmark test (unused). + +## [1.0.1] - 2018-07-19 +### Fixed +- Wrap Travis CI Golang versions in quotes (for parsing issues, see [this](https://github.com/travis-ci/travis-ci/issues/9247)). + +## [1.0.0] - 2018-07-19 +### Added +- AppVeyor configuration file for running tests in Windows. +- `vgo` module file. + +### Changed +- `FromContext` now receives a context key as additional parameter. +- `FromContext` now tries to build a JWT if value in context is a string. +- Simplified Travis CI configuration file. +- Update README to explain the motivation to have created this library and its differences from other JWT libraries for Golang. + +## [0.5.0] - 2018-03-12 +### Added +- `FromContext` function to extract a JWT object from a context. +- `FromCookie` function to extract a JWT object from a cookie. + +### Changed +- Split tests into several files in order to organize them. + +### Fixed +- Example in README file. + +## [0.4.0] - 2018-02-16 +### Added +- Support for "none" method. +- Tests for "none" method. +- Missing JWTID claim. +- Plugable validation via validator functions. + +### Changed +- `(*JWT).JWTID` method name to `(*JWT).ID`. + +### Fixed +- Message in `ErrECDSASigLen`. + +### Removed +- Comments from custom errors, since they are self-explanatory. + +## [0.3.0] - 2018-02-13 +### Changed +- Package structure. + +### Removed +- Additional packages (`jwtcrypto` and `jwtutil`). + +## [0.2.0] - 2018-02-06 +### Added +- New test cases. +- Claims' timestamps validation. + +### Changed +- Tests organization. +- Use `time.After` and `time.Before` for validating timestamps. +- `jwtcrypto/none.None` now implements `jwtcrypto.Signer`. + +### Fixed +- Panicking when private or public keys are `nil`. + +## 0.1.0 - 2018-02-06 +### Added +- This changelog file. +- README file. +- MIT License. +- Travis CI configuration file. +- Makefile. +- Git ignore file. +- EditorConfig file. +- This package's source code, including examples and tests. +- Go dep files. + +[Unreleased]: https://github.com/gbrlsnchs/jwt/compare/v2.0.0...HEAD +[2.0.0]: https://github.com/gbrlsnchs/jwt/compare/v1.1.0...v2.0.0 +[1.1.0]: https://github.com/gbrlsnchs/jwt/compare/v1.0.2...v1.1.0 +[1.0.2]: https://github.com/gbrlsnchs/jwt/compare/v1.0.1...v1.0.2 +[1.0.1]: https://github.com/gbrlsnchs/jwt/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/gbrlsnchs/jwt/compare/v0.5.0...v1.0.0 +[0.5.0]: https://github.com/gbrlsnchs/jwt/compare/v0.4.0...v0.5.0 +[0.4.0]: https://github.com/gbrlsnchs/jwt/compare/v0.3.0...v0.4.0 +[0.3.0]: https://github.com/gbrlsnchs/jwt/compare/v0.2.0...v0.3.0 +[0.2.0]: https://github.com/gbrlsnchs/jwt/compare/v0.1.0...v0.2.0 diff --git a/vendor/github.com/gbrlsnchs/jwt/LICENSE b/vendor/github.com/gbrlsnchs/jwt/LICENSE new file mode 100644 index 0000000000..f276fda569 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Gabriel Sanches + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gbrlsnchs/jwt/README.md b/vendor/github.com/gbrlsnchs/jwt/README.md new file mode 100644 index 0000000000..8b1b07614e --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/README.md @@ -0,0 +1,257 @@ +# jwt (JSON Web Token for Go) +[![JWT compatible](https://jwt.io/img/badge.svg)](https://jwt.io) + +[![Github Actions Status](https://github.com/gbrlsnchs/jwt/workflows/Linux,%20macOS%20and%20Windows/badge.svg)](https://github.com/gbrlsnchs/jwt/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/gbrlsnchs/jwt)](https://goreportcard.com/report/github.com/gbrlsnchs/jwt) +[![GoDoc](https://godoc.org/github.com/gbrlsnchs/jwt?status.svg)](https://pkg.go.dev/github.com/gbrlsnchs/jwt/v3) +[![Version compatibility with Go 1.11 onward using modules](https://img.shields.io/badge/compatible%20with-go1.11+-5272b4.svg)](https://github.com/gbrlsnchs/jwt#installing) +[![Join the chat at https://gitter.im/gbrlsnchs/jwt](https://badges.gitter.im/gbrlsnchs/jwt.svg)](https://gitter.im/gbrlsnchs/jwt?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +## About +This package is a JWT signer, verifier and validator for [Go](https://golang.org) (or Golang). + +Although there are many JWT packages out there for Go, many lack support for some signing, verifying or validation methods and, when they don't, they're overcomplicated. This package tries to mimic the ease of use from [Node JWT library](https://github.com/auth0/node-jsonwebtoken)'s API while following the [Effective Go](https://golang.org/doc/effective_go.html) guidelines. + +Support for [JWE](https://tools.ietf.org/html/rfc7516) isn't provided (not yet but is in the roadmap, see #17). Instead, [JWS](https://tools.ietf.org/html/rfc7515) is used, narrowed down to the [JWT specification](https://tools.ietf.org/html/rfc7519). + +### Supported signing methods +| | SHA-256 | SHA-384 | SHA-512 | +|:-------:|:------------------:|:------------------:|:------------------:| +| HMAC | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| RSA | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| RSA-PSS | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| ECDSA | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| EdDSA | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | + +## Important +Branch `master` is unstable, **always** use tagged versions. That way it is possible to differentiate pre-release tags from production ones. +In other words, API changes all the time in `master`. It's a place for public experiment. Thus, make use of the latest stable version via Go modules. + +## Usage +Full documentation [here](https://pkg.go.dev/github.com/gbrlsnchs/jwt/v3). + +### Installing +#### Important +For Go 1.11, make sure the environment variable `GO111MODULE` is set as `on` when running the install command. + +```sh +$ go get -u github.com/gbrlsnchs/jwt/v3 +``` + +### Signing +```go +import ( + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +type CustomPayload struct { + jwt.Payload + Foo string `json:"foo,omitempty"` + Bar int `json:"bar,omitempty"` +} + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + now := time.Now() + pl := CustomPayload{ + Payload: jwt.Payload{ + Issuer: "gbrlsnchs", + Subject: "someone", + Audience: jwt.Audience{"https://golang.org", "https://jwt.io"}, + ExpirationTime: jwt.NumericDate(now.Add(24 * 30 * 12 * time.Hour)), + NotBefore: jwt.NumericDate(now.Add(30 * time.Minute)), + IssuedAt: jwt.NumericDate(now), + JWTID: "foobar", + }, + Foo: "foo", + Bar: 1337, + } + + token, err := jwt.Sign(pl, hs) + if err != nil { + // ... + } + + // ... +} +``` + +### Verifying +```go +import "github.com/gbrlsnchs/jwt/v3" + +type CustomPayload struct { + jwt.Payload + Foo string `json:"foo,omitempty"` + Bar int `json:"bar,omitempty"` +} + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + // ... + + var pl CustomPayload + hd, err := jwt.Verify(token, hs, &pl) + if err != nil { + // ... + } + + // ... +} +``` + +### Other use case examples +
Setting "cty" and "kid" claims +

+ +The "cty" and "kid" claims can be set by passing options to the `jwt.Sign` function: +```go +import ( + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + pl := jwt.Payload{ + Subject: "gbrlsnchs", + Issuer: "gsr.dev", + IssuedAt: jwt.NumericDate(time.Now()), + } + + token, err := jwt.Sign(pl, hs, jwt.ContentType("JWT"), jwt.KeyID("my_key")) + if err != nil { + // ... + } + + // ... +} +``` + +

+
+ +
Validating claims +

+ + +```go +import ( + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +type CustomPayload struct { + jwt.Payload + Foo string `json:"foo,omitempty"` + Bar int `json:"bar,omitempty"` +} + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + // ... + + var ( + now = time.Now() + aud = jwt.Audience{"https://golang.org"} + + // Validate claims "iat", "exp" and "aud". + iatValidator = jwt.IssuedAtValidator(now) + expValidator = jwt.ExpirationTimeValidator(now) + audValidator = jwt.AudienceValidator(aud) + + // Use jwt.ValidatePayload to build a jwt.VerifyOption. + // Validators are run in the order informed. + pl CustomPayload + validatePayload = jwt.ValidatePayload(&pl.Payload, iatValidator, expValidator, audValidator) + ) + + hd, err := jwt.Verify(token, hs, &pl, validatePayload) + if err != nil { + // ... + } + + // ... +} +``` + +

+
+ +
Validating "alg" before verifying +

+ +For validating the "alg" field in a JOSE header **before** verification, the `jwt.ValidateHeader` option must be passed to `jwt.Verify`. +```go +import "github.com/gbrlsnchs/jwt/v3" + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + // ... + + var pl jwt.Payload + if _, err := jwt.Verify(token, hs, &pl, jwt.ValidateHeader); err != nil { + // ... + } + + // ... +} +``` + +

+
+ +
Using an Algorithm resolver +

+ +```go +import ( + "errors" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/jwtutil" +) + +var ( + // ... + + rs256 = jwt.NewRS256(jwt.RSAPublicKey(myRSAPublicKey)) + es256 = jwt.NewES256(jwt.ECDSAPublicKey(myECDSAPublicKey)) +) + +func main() { + rv := &jwtutil.Resolver{New: func(hd jwt.Header) (jwt.Algorithm, error) { + switch hd.KeyID { + case "foo": + return rs256, nil + case "bar": + return es256, nil + default: + return nil, errors.New(`invalid "kid"`) + } + }} + var pl jwt.Payload + if _, err := jwt.Verify(token, rv, &pl); err != nil { + // ... + } + + // ... +} +``` + +

+
+ +## Contributing +### How to help +- For bugs and opinions, please [open an issue](https://github.com/gbrlsnchs/jwt/issues/new) +- For pushing changes, please [open a pull request](https://github.com/gbrlsnchs/jwt/compare) diff --git a/vendor/github.com/gbrlsnchs/jwt/algorithm.go b/vendor/github.com/gbrlsnchs/jwt/algorithm.go new file mode 100644 index 0000000000..59c4ae82f1 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/algorithm.go @@ -0,0 +1,15 @@ +package jwt + +import ( + // Load all hashing functions needed. + _ "crypto/sha256" + _ "crypto/sha512" +) + +// Algorithm is an algorithm for both signing and verifying a JWT. +type Algorithm interface { + Name() string + Sign(headerPayload []byte) ([]byte, error) + Size() int + Verify(headerPayload, sig []byte) error +} diff --git a/vendor/github.com/gbrlsnchs/jwt/audience.go b/vendor/github.com/gbrlsnchs/jwt/audience.go new file mode 100644 index 0000000000..4fc3175a23 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/audience.go @@ -0,0 +1,43 @@ +package jwt + +import "encoding/json" + +// Audience is a special claim that may either be +// a single string or an array of strings, as per the RFC 7519. +type Audience []string + +// MarshalJSON implements a marshaling function for "aud" claim. +func (a Audience) MarshalJSON() ([]byte, error) { + switch len(a) { + case 0: + return json.Marshal("") // nil or empty slice returns an empty string + case 1: + return json.Marshal(a[0]) + default: + return json.Marshal([]string(a)) + } +} + +// UnmarshalJSON implements an unmarshaling function for "aud" claim. +func (a *Audience) UnmarshalJSON(b []byte) error { + var ( + v interface{} + err error + ) + if err = json.Unmarshal(b, &v); err != nil { + return err + } + switch vv := v.(type) { + case string: + aud := make(Audience, 1) + aud[0] = vv + *a = aud + case []interface{}: + aud := make(Audience, len(vv)) + for i := range vv { + aud[i] = vv[i].(string) + } + *a = aud + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/audience_test.go b/vendor/github.com/gbrlsnchs/jwt/audience_test.go new file mode 100644 index 0000000000..dd4b945d79 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/audience_test.go @@ -0,0 +1,91 @@ +package jwt_test + +import ( + "encoding/json" + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/google/go-cmp/cmp" +) + +func TestAudienceMarshal(t *testing.T) { + t.Run("omitempty", func(t *testing.T) { + var ( + b []byte + err error + v = struct { + Audience jwt.Audience `json:"aud,omitempty"` + }{} + ) + if b, err = json.Marshal(v); err != nil { + t.Fatal(err) + } + checkAudMarshal(t, b, "{}") + + }) + + testCases := []struct { + aud jwt.Audience + expected string + }{ + {jwt.Audience{"foo"}, `"foo"`}, + {jwt.Audience{"foo", "bar"}, `["foo","bar"]`}, + {nil, `""`}, + {jwt.Audience{}, `""`}, + {jwt.Audience{""}, `""`}, + } + for _, tc := range testCases { + t.Run(tc.expected, func(t *testing.T) { + var ( + b []byte + err error + ) + if tc.aud != nil { + if b, err = tc.aud.MarshalJSON(); err != nil { + t.Fatal(err) + } + checkAudMarshal(t, b, tc.expected) + } + if b, err = json.Marshal(tc.aud); err != nil { + t.Fatal(err) + } + checkAudMarshal(t, b, tc.expected) + }) + } +} + +func TestAudienceUnmarshal(t *testing.T) { + testCases := []struct { + jstr []byte + expected jwt.Audience + }{ + {[]byte(`"foo"`), jwt.Audience{"foo"}}, + {[]byte(`["foo","bar"]`), jwt.Audience{"foo", "bar"}}, + {[]byte("[]"), jwt.Audience{}}, + } + for _, tc := range testCases { + t.Run(string(tc.jstr), func(t *testing.T) { + var aud jwt.Audience + if err := aud.UnmarshalJSON(tc.jstr); err != nil { + t.Fatal(err) + } + checkAudUnmarshal(t, aud, tc.expected) + if err := json.Unmarshal(tc.jstr, &aud); err != nil { + t.Fatal(err) + } + checkAudUnmarshal(t, aud, tc.expected) + }) + } +} + +func checkAudMarshal(t *testing.T, got []byte, want string) { + if string(got) != want { + t.Errorf("jwt.Audience.Marshal mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } +} + +func checkAudUnmarshal(t *testing.T, got, want jwt.Audience) { + if !cmp.Equal(got, want) { + t.Errorf("jwt.Audience.Unmarshal mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/bench_test.go b/vendor/github.com/gbrlsnchs/jwt/bench_test.go new file mode 100644 index 0000000000..572d6434a3 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/bench_test.go @@ -0,0 +1,80 @@ +package jwt_test + +import ( + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +var ( + benchHS256 = jwt.NewHS256([]byte("secret")) + benchRecv []byte +) + +func BenchmarkSign(b *testing.B) { + now := time.Now() + var ( + token []byte + err error + pl = jwt.Payload{ + Issuer: "gbrlsnchs", + Subject: "someone", + Audience: jwt.Audience{"https://golang.org", "https://jwt.io"}, + ExpirationTime: jwt.NumericDate(now.Add(24 * 30 * 12 * time.Hour)), + NotBefore: jwt.NumericDate(now.Add(30 * time.Minute)), + IssuedAt: jwt.NumericDate(now), + JWTID: "foobar", + } + ) + b.Run("Default", func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + token, err = jwt.Sign(pl, benchHS256) + if err != nil { + b.Fatal(err) + } + } + }) + b.Run(`With "kid"`, func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + token, err = jwt.Sign(pl, benchHS256, jwt.KeyID("kid")) + if err != nil { + b.Fatal(err) + } + } + }) + b.Run(`With "cty" and "kid"`, func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + token, err = jwt.Sign(pl, benchHS256, jwt.ContentType("cty"), jwt.KeyID("kid")) + if err != nil { + b.Fatal(err) + } + } + }) + + benchRecv = token + +} + +func BenchmarkVerify(b *testing.B) { + var ( + token = []byte( + "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9." + + "eyJpc3MiOiJnYnJsc25jaHMiLCJzdWIiOiJzb21lb25lIiwiYXVkIjpbImh0dHBzOi8vZ29sYW5nLm9yZyIsImh0dHBzOi8vand0LmlvIl0sImV4cCI6MTU5MzM5MTE4MiwibmJmIjoxNTYyMjg4OTgyLCJpYXQiOjE1NjIyODcxODIsImp0aSI6ImZvb2JhciJ9." + + "bKevp7jmMbH9-Hy5g5OxLgq8tg13z9voH7lZ4m9y484", + ) + err error + ) + b.Run("Default", func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + var pl jwt.Payload + if _, err = jwt.Verify(token, benchHS256, &pl); err != nil { + b.Fatal(err) + } + } + }) +} diff --git a/vendor/github.com/gbrlsnchs/jwt/doc.go b/vendor/github.com/gbrlsnchs/jwt/doc.go new file mode 100644 index 0000000000..51632cd856 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/doc.go @@ -0,0 +1,2 @@ +// Package jwt is a JSON Web Token signer, verifier and validator. +package jwt diff --git a/vendor/github.com/gbrlsnchs/jwt/ecdsa_sha.go b/vendor/github.com/gbrlsnchs/jwt/ecdsa_sha.go new file mode 100644 index 0000000000..daf1b32c42 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/ecdsa_sha.go @@ -0,0 +1,153 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "math/big" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrECDSANilPrivKey is the error for trying to sign a JWT with a nil private key. + ErrECDSANilPrivKey = internal.NewError("jwt: ECDSA private key is nil") + // ErrECDSANilPubKey is the error for trying to verify a JWT with a nil public key. + ErrECDSANilPubKey = internal.NewError("jwt: ECDSA public key is nil") + // ErrECDSAVerification is the error for an invalid ECDSA signature. + ErrECDSAVerification = internal.NewError("jwt: ECDSA verification failed") + + _ Algorithm = new(ECDSASHA) +) + +// ECDSAPrivateKey is an option to set a private key to the ECDSA-SHA algorithm. +func ECDSAPrivateKey(priv *ecdsa.PrivateKey) func(*ECDSASHA) { + return func(es *ECDSASHA) { + es.priv = priv + } +} + +// ECDSAPublicKey is an option to set a public key to the ECDSA-SHA algorithm. +func ECDSAPublicKey(pub *ecdsa.PublicKey) func(*ECDSASHA) { + return func(es *ECDSASHA) { + es.pub = pub + } +} + +func byteSize(bitSize int) int { + byteSize := bitSize / 8 + if bitSize%8 > 0 { + return byteSize + 1 + } + return byteSize +} + +// ECDSASHA is an algorithm that uses ECDSA to sign SHA hashes. +type ECDSASHA struct { + name string + priv *ecdsa.PrivateKey + pub *ecdsa.PublicKey + sha crypto.Hash + size int + + pool *hashPool +} + +func newECDSASHA(name string, opts []func(*ECDSASHA), sha crypto.Hash) *ECDSASHA { + es := ECDSASHA{ + name: name, + sha: sha, + pool: newHashPool(sha.New), + } + for _, opt := range opts { + if opt != nil { + opt(&es) + } + } + if es.pub == nil { + if es.priv == nil { + panic(ErrECDSANilPrivKey) + } + es.pub = &es.priv.PublicKey + } + es.size = byteSize(es.pub.Params().BitSize) * 2 + return &es +} + +// NewES256 creates a new algorithm using ECDSA and SHA-256. +func NewES256(opts ...func(*ECDSASHA)) *ECDSASHA { + return newECDSASHA("ES256", opts, crypto.SHA256) +} + +// NewES384 creates a new algorithm using ECDSA and SHA-384. +func NewES384(opts ...func(*ECDSASHA)) *ECDSASHA { + return newECDSASHA("ES384", opts, crypto.SHA384) +} + +// NewES512 creates a new algorithm using ECDSA and SHA-512. +func NewES512(opts ...func(*ECDSASHA)) *ECDSASHA { + return newECDSASHA("ES512", opts, crypto.SHA512) +} + +// Name returns the algorithm's name. +func (es *ECDSASHA) Name() string { + return es.name +} + +// Sign signs headerPayload using the ECDSA-SHA algorithm. +func (es *ECDSASHA) Sign(headerPayload []byte) ([]byte, error) { + if es.priv == nil { + return nil, ErrECDSANilPrivKey + } + return es.sign(headerPayload) +} + +// Size returns the signature's byte size. +func (es *ECDSASHA) Size() int { + return es.size +} + +// Verify verifies a signature based on headerPayload using ECDSA-SHA. +func (es *ECDSASHA) Verify(headerPayload, sig []byte) (err error) { + if es.pub == nil { + return ErrECDSANilPubKey + } + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + byteSize := byteSize(es.pub.Params().BitSize) + if len(sig) != byteSize*2 { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:byteSize]) + s := big.NewInt(0).SetBytes(sig[byteSize:]) + sum, err := es.pool.sign(headerPayload) + if err != nil { + return err + } + if !ecdsa.Verify(es.pub, sum, r, s) { + return ErrECDSAVerification + } + return nil +} + +func (es *ECDSASHA) sign(headerPayload []byte) ([]byte, error) { + sum, err := es.pool.sign(headerPayload) + if err != nil { + return nil, err + } + r, s, err := ecdsa.Sign(rand.Reader, es.priv, sum) + if err != nil { + return nil, err + } + byteSize := byteSize(es.priv.Params().BitSize) + rbytes := r.Bytes() + rsig := make([]byte, byteSize) + copy(rsig[byteSize-len(rbytes):], rbytes) + + sbytes := s.Bytes() + ssig := make([]byte, byteSize) + copy(ssig[byteSize-len(sbytes):], sbytes) + return append(rsig, ssig...), nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/ecdsa_sha_test.go b/vendor/github.com/gbrlsnchs/jwt/ecdsa_sha_test.go new file mode 100644 index 0000000000..2e9373687e --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/ecdsa_sha_test.go @@ -0,0 +1,256 @@ +package jwt_test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +var ( + es256PrivateKey1, es256PublicKey1 = genECDSAKeys(elliptic.P256()) + es256PrivateKey2, es256PublicKey2 = genECDSAKeys(elliptic.P256()) + + es384PrivateKey1, es384PublicKey1 = genECDSAKeys(elliptic.P384()) + es384PrivateKey2, es384PublicKey2 = genECDSAKeys(elliptic.P384()) + + es512PrivateKey1, es512PublicKey1 = genECDSAKeys(elliptic.P521()) + es512PrivateKey2, es512PublicKey2 = genECDSAKeys(elliptic.P521()) + + ecdsaTestCases = []testCase{ + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es256PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es256PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es256PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es384PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es384PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es384PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPublicKey(es512PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es512PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPublicKey(es512PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + } +) + +func TestNewECDSASHA(t *testing.T) { + testCases := []struct { + builder func(...func(*jwt.ECDSASHA)) *jwt.ECDSASHA + opts func(*jwt.ECDSASHA) + err error + }{ + {jwt.NewES256, nil, jwt.ErrECDSANilPrivKey}, + {jwt.NewES256, jwt.ECDSAPrivateKey(nil), jwt.ErrECDSANilPrivKey}, + {jwt.NewES256, jwt.ECDSAPrivateKey(es256PrivateKey1), nil}, + {jwt.NewES256, jwt.ECDSAPublicKey(es256PublicKey1), nil}, + {jwt.NewES384, nil, jwt.ErrECDSANilPrivKey}, + {jwt.NewES384, jwt.ECDSAPrivateKey(nil), jwt.ErrECDSANilPrivKey}, + {jwt.NewES384, jwt.ECDSAPrivateKey(es384PrivateKey1), nil}, + {jwt.NewES384, jwt.ECDSAPublicKey(es384PublicKey1), nil}, + {jwt.NewES512, nil, jwt.ErrECDSANilPrivKey}, + {jwt.NewES512, jwt.ECDSAPrivateKey(nil), jwt.ErrECDSANilPrivKey}, + {jwt.NewES512, jwt.ECDSAPrivateKey(es512PrivateKey1), nil}, + {jwt.NewES512, jwt.ECDSAPublicKey(es512PublicKey1), nil}, + } + for _, tc := range testCases { + funcName := funcName(tc.builder) + t.Run(funcName, func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + t.Fatal("r is not an error") + } + if want, got := tc.err, err; !internal.ErrorIs(got, want) { + t.Fatalf("jwt.%s err mismatch (-want +got):\n%s", funcName, cmp.Diff(want, got)) + } + } + }() + _ = tc.builder(tc.opts) + if tc.err != nil { + t.Fatalf("jwt.%s didn't panicked", funcName) + } + }) + } +} + +func genECDSAKeys(c elliptic.Curve) (*ecdsa.PrivateKey, *ecdsa.PublicKey) { + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + panic(err) + } + return priv, &priv.PublicKey +} diff --git a/vendor/github.com/gbrlsnchs/jwt/ed25519.go b/vendor/github.com/gbrlsnchs/jwt/ed25519.go new file mode 100644 index 0000000000..8aa85011d0 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/ed25519.go @@ -0,0 +1,89 @@ +// +build go1.13 + +package jwt + +import ( + "crypto/ed25519" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrEd25519NilPrivKey is the error for trying to sign a JWT with a nil private key. + ErrEd25519NilPrivKey = internal.NewError("jwt: Ed25519 private key is nil") + // ErrEd25519NilPubKey is the error for trying to verify a JWT with a nil public key. + ErrEd25519NilPubKey = internal.NewError("jwt: Ed25519 public key is nil") + // ErrEd25519Verification is the error for when verification with Ed25519 fails. + ErrEd25519Verification = internal.NewError("jwt: Ed25519 verification failed") + + _ Algorithm = new(Ed25519) +) + +// Ed25519PrivateKey is an option to set a private key to the Ed25519 algorithm. +func Ed25519PrivateKey(priv ed25519.PrivateKey) func(*Ed25519) { + return func(ed *Ed25519) { + ed.priv = priv + } +} + +// Ed25519PublicKey is an option to set a public key to the Ed25519 algorithm. +func Ed25519PublicKey(pub ed25519.PublicKey) func(*Ed25519) { + return func(ed *Ed25519) { + ed.pub = pub + } +} + +// Ed25519 is an algorithm that uses EdDSA to sign SHA-512 hashes. +type Ed25519 struct { + priv ed25519.PrivateKey + pub ed25519.PublicKey +} + +// NewEd25519 creates a new algorithm using EdDSA and SHA-512. +func NewEd25519(opts ...func(*Ed25519)) *Ed25519 { + var ed Ed25519 + for _, opt := range opts { + if opt != nil { + opt(&ed) + } + } + if ed.pub == nil { + if len(ed.priv) == 0 { + panic(ErrEd25519NilPrivKey) + } + ed.pub = ed.priv.Public().(ed25519.PublicKey) + } + return &ed +} + +// Name returns the algorithm's name. +func (*Ed25519) Name() string { + return "Ed25519" +} + +// Sign signs headerPayload using the Ed25519 algorithm. +func (ed *Ed25519) Sign(headerPayload []byte) ([]byte, error) { + if ed.priv == nil { + return nil, ErrEd25519NilPrivKey + } + return ed25519.Sign(ed.priv, headerPayload), nil +} + +// Size returns the signature byte size. +func (*Ed25519) Size() int { + return ed25519.SignatureSize +} + +// Verify verifies a payload and a signature. +func (ed *Ed25519) Verify(payload, sig []byte) (err error) { + if ed.pub == nil { + return ErrEd25519NilPubKey + } + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + if !ed25519.Verify(ed.pub, payload, sig) { + return ErrEd25519Verification + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/ed25519_go1_12.go b/vendor/github.com/gbrlsnchs/jwt/ed25519_go1_12.go new file mode 100644 index 0000000000..317c5a8ff4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/ed25519_go1_12.go @@ -0,0 +1,88 @@ +// +build !go1.13 + +package jwt + +import ( + "github.com/gbrlsnchs/jwt/v3/internal" + "golang.org/x/crypto/ed25519" +) + +var ( + // ErrEd25519NilPrivKey is the error for trying to sign a JWT with a nil private key. + ErrEd25519NilPrivKey = internal.NewError("jwt: Ed25519 private key is nil") + // ErrEd25519NilPubKey is the error for trying to verify a JWT with a nil public key. + ErrEd25519NilPubKey = internal.NewError("jwt: Ed25519 public key is nil") + // ErrEd25519Verification is the error for when verification with Ed25519 fails. + ErrEd25519Verification = internal.NewError("jwt: Ed25519 verification failed") + + _ Algorithm = new(Ed25519) +) + +// Ed25519PrivateKey is an option to set a private key to the Ed25519 algorithm. +func Ed25519PrivateKey(priv ed25519.PrivateKey) func(*Ed25519) { + return func(ed *Ed25519) { + ed.priv = priv + } +} + +// Ed25519PublicKey is an option to set a public key to the Ed25519 algorithm. +func Ed25519PublicKey(pub ed25519.PublicKey) func(*Ed25519) { + return func(ed *Ed25519) { + ed.pub = pub + } +} + +// Ed25519 is an algorithm that uses EdDSA to sign SHA-512 hashes. +type Ed25519 struct { + priv ed25519.PrivateKey + pub ed25519.PublicKey +} + +// NewEd25519 creates a new algorithm using EdDSA and SHA-512. +func NewEd25519(opts ...func(*Ed25519)) *Ed25519 { + var ed Ed25519 + for _, opt := range opts { + if opt != nil { + opt(&ed) + } + } + if ed.pub == nil { + if len(ed.priv) == 0 { + panic(ErrEd25519NilPrivKey) + } + ed.pub = ed.priv.Public().(ed25519.PublicKey) + } + return &ed +} + +// Name returns the algorithm's name. +func (*Ed25519) Name() string { + return "Ed25519" +} + +// Sign signs headerPayload using the Ed25519 algorithm. +func (ed *Ed25519) Sign(headerPayload []byte) ([]byte, error) { + if ed.priv == nil { + return nil, ErrEd25519NilPrivKey + } + return ed25519.Sign(ed.priv, headerPayload), nil +} + +// Size returns the signature byte size. +func (*Ed25519) Size() int { + return ed25519.SignatureSize +} + +// Verify verifies a payload and a signature. +func (ed *Ed25519) Verify(payload, sig []byte) (err error) { + if ed.pub == nil { + return ErrEd25519NilPubKey + } + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + if !ed25519.Verify(ed.pub, payload, sig) { + return ErrEd25519Verification + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/ed25519_test.go b/vendor/github.com/gbrlsnchs/jwt/ed25519_test.go new file mode 100644 index 0000000000..6c114218b7 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/ed25519_test.go @@ -0,0 +1,98 @@ +package jwt_test + +import ( + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +var ( + ed25519PrivateKey1, ed25519PublicKey1 = internal.GenerateEd25519Keys() + ed25519PrivateKey2, ed25519PublicKey2 = internal.GenerateEd25519Keys() + + ed25519TestCases = []testCase{ + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PublicKey(ed25519PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrEd25519Verification, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PublicKey(ed25519PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrEd25519Verification, + }, + } +) + +func TestNewEd25519(t *testing.T) { + testCases := []struct { + builder func(...func(*jwt.Ed25519)) *jwt.Ed25519 + opts func(*jwt.Ed25519) + err error + }{ + {jwt.NewEd25519, nil, jwt.ErrEd25519NilPrivKey}, + {jwt.NewEd25519, jwt.Ed25519PrivateKey(nil), jwt.ErrEd25519NilPrivKey}, + {jwt.NewEd25519, jwt.Ed25519PrivateKey(ed25519PrivateKey1), nil}, + {jwt.NewEd25519, jwt.Ed25519PublicKey(ed25519PublicKey1), nil}, + } + for _, tc := range testCases { + funcName := funcName(tc.builder) + t.Run(funcName, func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + t.Fatal("r is not an error") + } + if want, got := tc.err, err; !internal.ErrorIs(got, want) { + t.Fatalf("jwt.%s err mismatch (-want +got):\n%s", funcName, cmp.Diff(want, got)) + } + } + }() + _ = tc.builder(tc.opts) + if tc.err != nil { + t.Fatalf("jwt.%s didn't panicked", funcName) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/go.mod b/vendor/github.com/gbrlsnchs/jwt/go.mod new file mode 100644 index 0000000000..b8795d0aa6 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/go.mod @@ -0,0 +1,12 @@ +module github.com/gbrlsnchs/jwt/v3 + +go 1.10 + +require ( + github.com/google/go-cmp v0.4.0 + github.com/magefile/mage v1.9.0 + golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad + golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac + golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) diff --git a/vendor/github.com/gbrlsnchs/jwt/go.sum b/vendor/github.com/gbrlsnchs/jwt/go.sum new file mode 100644 index 0000000000..aa91e463f5 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/go.sum @@ -0,0 +1,25 @@ +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e h1:1xWUkZQQ9Z9UuZgNaIR6OQOE7rUFglXUUBZlO+dGg6I= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/gbrlsnchs/jwt/hash_pool.go b/vendor/github.com/gbrlsnchs/jwt/hash_pool.go new file mode 100644 index 0000000000..89aa4cb9a0 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/hash_pool.go @@ -0,0 +1,27 @@ +package jwt + +import ( + "hash" + "sync" +) + +type hashPool struct { + *sync.Pool +} + +func newHashPool(hfunc func() hash.Hash) *hashPool { + return &hashPool{&sync.Pool{New: func() interface{} { return hfunc() }}} +} + +func (hp *hashPool) sign(headerPayload []byte) ([]byte, error) { + hh := hp.Pool.Get().(hash.Hash) + defer func() { + hh.Reset() + hp.Pool.Put(hh) + }() + + if _, err := hh.Write(headerPayload); err != nil { + return nil, err + } + return hh.Sum(nil), nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/header.go b/vendor/github.com/gbrlsnchs/jwt/header.go new file mode 100644 index 0000000000..ae4fb6dfb4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/header.go @@ -0,0 +1,11 @@ +package jwt + +// Header is a JOSE header narrowed down to the JWT specification from RFC 7519. +// +// Parameters are ordered according to the RFC 7515. +type Header struct { + Algorithm string `json:"alg,omitempty"` + ContentType string `json:"cty,omitempty"` + KeyID string `json:"kid,omitempty"` + Type string `json:"typ,omitempty"` +} diff --git a/vendor/github.com/gbrlsnchs/jwt/hmac_sha.go b/vendor/github.com/gbrlsnchs/jwt/hmac_sha.go new file mode 100644 index 0000000000..a9d814a2fe --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/hmac_sha.go @@ -0,0 +1,88 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "hash" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrHMACMissingKey is the error for trying to sign or verify a JWT with an empty key. + ErrHMACMissingKey = internal.NewError("jwt: HMAC key is empty") + // ErrHMACVerification is the error for an invalid signature. + ErrHMACVerification = internal.NewError("jwt: HMAC verification failed") + + _ Algorithm = new(HMACSHA) +) + +// HMACSHA is an algorithm that uses HMAC to sign SHA hashes. +type HMACSHA struct { + name string + key []byte + sha crypto.Hash + size int + pool *hashPool +} + +func newHMACSHA(name string, key []byte, sha crypto.Hash) *HMACSHA { + if len(key) == 0 { + panic(ErrHMACMissingKey) + } + return &HMACSHA{ + name: name, // cache name + key: key, + sha: sha, + size: sha.Size(), // cache size + pool: newHashPool(func() hash.Hash { return hmac.New(sha.New, key) }), + } +} + +// NewHS256 creates a new algorithm using HMAC and SHA-256. +func NewHS256(key []byte) *HMACSHA { + return newHMACSHA("HS256", key, crypto.SHA256) +} + +// NewHS384 creates a new algorithm using HMAC and SHA-384. +func NewHS384(key []byte) *HMACSHA { + return newHMACSHA("HS384", key, crypto.SHA384) +} + +// NewHS512 creates a new algorithm using HMAC and SHA-512. +func NewHS512(key []byte) *HMACSHA { + return newHMACSHA("HS512", key, crypto.SHA512) +} + +// Name returns the algorithm's name. +func (hs *HMACSHA) Name() string { + return hs.name +} + +// Sign signs headerPayload using the HMAC-SHA algorithm. +func (hs *HMACSHA) Sign(headerPayload []byte) ([]byte, error) { + if string(hs.key) == "" { + return nil, ErrHMACMissingKey + } + return hs.pool.sign(headerPayload) +} + +// Size returns the signature's byte size. +func (hs *HMACSHA) Size() int { + return hs.size +} + +// Verify verifies a signature based on headerPayload using HMAC-SHA. +func (hs *HMACSHA) Verify(headerPayload, sig []byte) (err error) { + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + sig2, err := hs.Sign(headerPayload) + if err != nil { + return err + } + if !hmac.Equal(sig, sig2) { + return ErrHMACVerification + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/hmac_sha_test.go b/vendor/github.com/gbrlsnchs/jwt/hmac_sha_test.go new file mode 100644 index 0000000000..c94ed404ca --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/hmac_sha_test.go @@ -0,0 +1,173 @@ +package jwt_test + +import ( + "reflect" + "runtime" + "strings" + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +var ( + hmacKey1 = []byte("secret") + hmacKey2 = []byte("terces") + + hmacTestCases = []testCase{ + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS512(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS512(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + } +) + +func TestNewHMACSHA(t *testing.T) { + testCases := []struct { + builder func([]byte) *jwt.HMACSHA + key []byte + err error + }{ + {jwt.NewHS256, nil, jwt.ErrHMACMissingKey}, + {jwt.NewHS256, []byte(""), jwt.ErrHMACMissingKey}, + {jwt.NewHS256, []byte("a"), nil}, + {jwt.NewHS384, nil, jwt.ErrHMACMissingKey}, + {jwt.NewHS384, []byte(""), jwt.ErrHMACMissingKey}, + {jwt.NewHS384, []byte("a"), nil}, + {jwt.NewHS512, nil, jwt.ErrHMACMissingKey}, + {jwt.NewHS512, []byte(""), jwt.ErrHMACMissingKey}, + {jwt.NewHS512, []byte("a"), nil}, + } + for _, tc := range testCases { + funcName := funcName(tc.builder) + t.Run(funcName, func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + t.Fatal("r is not an error") + } + if want, got := tc.err, err; !internal.ErrorIs(got, want) { + t.Fatalf("jwt.%s err mismatch (-want +got):\n%s", funcName, cmp.Diff(want, got)) + } + } + }() + _ = tc.builder(tc.key) + if tc.err != nil { + t.Fatalf("jwt.%s didn't panicked", funcName) + } + }) + } +} + +func funcName(fn interface{}) string { + return strings.Split( + runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name(), + ".", + )[2] +} diff --git a/vendor/github.com/gbrlsnchs/jwt/internal/decode.go b/vendor/github.com/gbrlsnchs/jwt/internal/decode.go new file mode 100644 index 0000000000..f03525e501 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/internal/decode.go @@ -0,0 +1,25 @@ +package internal + +import ( + "encoding/base64" + "encoding/json" +) + +// Decode decodes a Base64 encoded JSON object using the proper encoding for JWTs. +func Decode(enc []byte, v interface{}) error { + dec, err := DecodeToBytes(enc) + if err != nil { + return err + } + return json.Unmarshal(dec, v) +} + +// DecodeToBytes decodes a Base64 string using the proper encoding for JWTs. +func DecodeToBytes(enc []byte) ([]byte, error) { + encoding := base64.RawURLEncoding + dec := make([]byte, encoding.DecodedLen(len(enc))) + if _, err := encoding.Decode(dec, enc); err != nil { + return nil, err + } + return dec, nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/internal/decode_test.go b/vendor/github.com/gbrlsnchs/jwt/internal/decode_test.go new file mode 100644 index 0000000000..0d6299f1c9 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/internal/decode_test.go @@ -0,0 +1,53 @@ +package internal_test + +import ( + "encoding/base64" + "testing" + + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +var ( + stdEnc = base64.StdEncoding + rawURLEnc = base64.RawURLEncoding +) + +type decodeTest struct { + X string `json:"x,omitempty"` +} + +func TestDecode(t *testing.T) { + testCases := []struct { + encoding *base64.Encoding + json string + expected string + errors bool + }{ + {rawURLEnc, "{}", "", false}, + {rawURLEnc, `{"x":"test"}`, "test", false}, + {stdEnc, "{}", "", true}, + {stdEnc, `{"x":"test"}`, "test", false}, // the output is the same as with RawURLEncoding + {nil, "{}", "", true}, + {nil, `{"x":"test"}`, "", true}, + } + for _, tc := range testCases { + t.Run(tc.json, func(t *testing.T) { + b64 := tc.json + if tc.encoding != nil { + b64 = tc.encoding.EncodeToString([]byte(tc.json)) + } + t.Logf("b64: %s", b64) + var ( + dt decodeTest + err = internal.Decode([]byte(b64), &dt) + ) + if want, got := tc.errors, internal.ErrorAs(err, new(base64.CorruptInputError)); got != want { + t.Fatalf("want %t, got %t: %v", want, got, err) + } + if want, got := tc.expected, dt.X; got != want { + t.Errorf("internal.Decode mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/internal/ed25519.go b/vendor/github.com/gbrlsnchs/jwt/internal/ed25519.go new file mode 100644 index 0000000000..7cc0bc2f42 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/internal/ed25519.go @@ -0,0 +1,17 @@ +// +build go1.13 + +package internal + +import ( + "crypto/ed25519" + "crypto/rand" +) + +// GenerateEd25519Keys generates a pair of keys for testing purposes. +func GenerateEd25519Keys() (ed25519.PrivateKey, ed25519.PublicKey) { + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + return priv, pub +} diff --git a/vendor/github.com/gbrlsnchs/jwt/internal/ed25519_go1_12.go b/vendor/github.com/gbrlsnchs/jwt/internal/ed25519_go1_12.go new file mode 100644 index 0000000000..3436a4c64b --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/internal/ed25519_go1_12.go @@ -0,0 +1,18 @@ +// +build !go1.13 + +package internal + +import ( + "crypto/rand" + + "golang.org/x/crypto/ed25519" +) + +// GenerateEd25519Keys generates a pair of keys for testing purposes. +func GenerateEd25519Keys() (ed25519.PrivateKey, ed25519.PublicKey) { + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + return priv, pub +} diff --git a/vendor/github.com/gbrlsnchs/jwt/internal/epoch.go b/vendor/github.com/gbrlsnchs/jwt/internal/epoch.go new file mode 100644 index 0000000000..494c09e6bb --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/internal/epoch.go @@ -0,0 +1,6 @@ +package internal + +import "time" + +// Epoch is 01/01/1970. +var Epoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) diff --git a/vendor/github.com/gbrlsnchs/jwt/internal/errors.go b/vendor/github.com/gbrlsnchs/jwt/internal/errors.go new file mode 100644 index 0000000000..f000802e44 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/internal/errors.go @@ -0,0 +1,20 @@ +// +build go1.13 + +package internal + +import ( + "errors" + "fmt" +) + +// Errorf wraps fmt.Errorf. +func Errorf(format string, a ...interface{}) error { return fmt.Errorf(format, a...) } + +// ErrorAs wraps errors.As. +func ErrorAs(err error, target interface{}) bool { return errors.As(err, target) } + +// ErrorIs wraps errors.Is. +func ErrorIs(err, target error) bool { return errors.Is(err, target) } + +// NewError wraps errors.New. +func NewError(text string) error { return errors.New(text) } diff --git a/vendor/github.com/gbrlsnchs/jwt/internal/errors_go1_12.go b/vendor/github.com/gbrlsnchs/jwt/internal/errors_go1_12.go new file mode 100644 index 0000000000..9aa55db330 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/internal/errors_go1_12.go @@ -0,0 +1,17 @@ +// +build !go1.13 + +package internal + +import "golang.org/x/xerrors" + +// Errorf wraps xerrors.Errorf. +func Errorf(format string, a ...interface{}) error { return xerrors.Errorf(format, a...) } + +// ErrorAs wraps xerrors.As. +func ErrorAs(err error, target interface{}) bool { return xerrors.As(err, target) } + +// ErrorIs wraps xerrors.Is. +func ErrorIs(err, target error) bool { return xerrors.Is(err, target) } + +// NewError wraps xerrors.New. +func NewError(text string) error { return xerrors.New(text) } diff --git a/vendor/github.com/gbrlsnchs/jwt/json.go b/vendor/github.com/gbrlsnchs/jwt/json.go new file mode 100644 index 0000000000..cdbdb76f9f --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/json.go @@ -0,0 +1,14 @@ +package jwt + +import ( + "bytes" + "errors" +) + +// ErrNotJSONObject is the error for when a JWT payload is not a JSON object. +var ErrNotJSONObject = errors.New("jwt: payload is not a valid JSON object") + +func isJSONObject(payload []byte) bool { + payload = bytes.TrimSpace(payload) + return payload[0] == '{' && payload[len(payload)-1] == '}' +} diff --git a/vendor/github.com/gbrlsnchs/jwt/jwtutil/resolver.go b/vendor/github.com/gbrlsnchs/jwt/jwtutil/resolver.go new file mode 100644 index 0000000000..6fc77b0030 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/jwtutil/resolver.go @@ -0,0 +1,57 @@ +package jwtutil + +import ( + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" +) + +// Resolver is an Algorithm resolver. +type Resolver struct { + New func(jwt.Header) (jwt.Algorithm, error) + alg jwt.Algorithm +} + +// ErrNilAlg is the error for when an algorithm can't be resolved. +var ErrNilAlg = internal.NewError("algorithm is nil") + +// Name returns an Algorithm's name. +func (rv *Resolver) Name() string { + if rv.alg == nil { + return "" + } + return rv.alg.Name() +} + +// Resolve sets an Algorithm based on a JOSE Header. +func (rv *Resolver) Resolve(hd jwt.Header) error { + if rv.alg != nil { + return nil + } + if rv.New == nil { + return ErrNilAlg + } + alg, err := rv.New(hd) + if err != nil { + return err + } + if alg == nil { + return ErrNilAlg + } + rv.alg = alg + return nil +} + +// Sign returns an error since Resolver doesn't support signing. +func (rv *Resolver) Sign(headerPayload []byte) ([]byte, error) { + return rv.alg.Sign(headerPayload) +} + +// Size returns an Algorithm's size. +func (rv *Resolver) Size() int { + return rv.alg.Size() +} + +// Verify resolves and Algorithm and verifies using it. +func (rv *Resolver) Verify(headerPayload, sig []byte) error { + return rv.alg.Verify(headerPayload, sig) +} diff --git a/vendor/github.com/gbrlsnchs/jwt/jwtutil/resolver_test.go b/vendor/github.com/gbrlsnchs/jwt/jwtutil/resolver_test.go new file mode 100644 index 0000000000..f5e7b0a68a --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/jwtutil/resolver_test.go @@ -0,0 +1,52 @@ +package jwtutil_test + +import ( + "errors" + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/jwtutil" +) + +var hs256 = jwt.NewHS256([]byte("resolver")) + +func TestResolver(t *testing.T) { + testCases := []struct { + signer jwt.Algorithm + signOpts []jwt.SignOption + verifier jwt.Algorithm + }{ + { + signer: hs256, + verifier: &jwtutil.Resolver{ + New: func(hd jwt.Header) (jwt.Algorithm, error) { + return hs256, nil + }, + }, + }, + { + signer: hs256, + signOpts: []jwt.SignOption{jwt.KeyID("test")}, + verifier: &jwtutil.Resolver{ + New: func(hd jwt.Header) (jwt.Algorithm, error) { + if hd.KeyID != "test" { + return nil, errors.New(`wrong "kid"`) + } + return hs256, nil + }, + }, + }, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + token, err := jwt.Sign(jwt.Payload{}, tc.signer, tc.signOpts...) + if err != nil { + t.Fatal(err) + } + var pl jwt.Payload + if _, err = jwt.Verify(token, tc.verifier, &pl); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/magefile.go b/vendor/github.com/gbrlsnchs/jwt/magefile.go new file mode 100644 index 0000000000..ae4892c46a --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/magefile.go @@ -0,0 +1,109 @@ +// +build mage,go1.9 + +package main + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/magefile/mage/sh" +) + +const ( + // Linting tools. + goplsImportPath = "golang.org/x/tools/gopls" + goimportsImportPath = "golang.org/x/tools/cmd/goimports" + golintImportPath = "golang.org/x/lint/golint" + + bin = ".bin" +) + +var ( + goCmd = os.Getenv("GOCMD") + goVersion = runtime.Version() + + cwd, _ = os.Getwd() +) + +func init() { + if goCmd == "" { + goCmd = "go" + } +} + +// Fix runs "goimports -w ." to fix all files. +func Fix() error { + root, err := findRoot(cwd) + if err != nil { + return err + } + + goimportsCmd := filepath.Join(root, bin, "goimports") + return sh.Run(goimportsCmd, "-w", root) +} + +// Install installs all development dependencies. +func Install() error { + root, err := findRoot(cwd) + if err != nil { + return err + } + deps := []string{ + goplsImportPath, // not used in CI + goimportsImportPath, + golintImportPath, + } + gobin := filepath.Join(root, bin) + for _, dep := range deps { + if err := sh.RunWith( + map[string]string{"GOBIN": gobin}, + goCmd, "install", dep, + ); err != nil { + return err + } + } + return nil +} + +// Lint lints using "golint" and "goimports". +func Lint() error { + root, err := findRoot(cwd) + if err != nil { + return err + } + goimportsCmd := filepath.Join(root, bin, "goimports") + goimportsDiff, err := sh.Output(goimportsCmd, "-d", root) + if err != nil { + return err + } + if goimportsDiff != "" { + return fmt.Errorf("\n%s", goimportsDiff) + } + golintCmd := filepath.Join(root, bin, "golint") + return sh.Run(golintCmd, "-set_exit_status", filepath.Join(root, "...")) +} + +// Test tests using "go test". +func Test() error { + root, err := findRoot(cwd) + if err != nil { + return err + } + flags := strings.Split(os.Getenv("TEST_FLAGS"), " ") + args := append([]string{"test"}, flags...) + return sh.Run(goCmd, append(args, filepath.Join(root, "..."))...) +} + +func findRoot(dir string) (string, error) { + matches, err := filepath.Glob(filepath.Join(dir, "go.mod")) + if err != nil { + return "", err + } + if matches != nil { + return dir, nil + } + return findRoot(filepath.Dir(dir)) +} diff --git a/vendor/github.com/gbrlsnchs/jwt/none.go b/vendor/github.com/gbrlsnchs/jwt/none.go new file mode 100644 index 0000000000..d1a4f1647d --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/none.go @@ -0,0 +1,20 @@ +package jwt + +var _ Algorithm = none{} + +type none struct{} + +// None returns a dull, unsecured algorithm. +func None() Algorithm { return none{} } + +// Name always returns "none". +func (none) Name() string { return "none" } + +// Sign always returns a nil byte slice and a nil error. +func (none) Sign(_ []byte) ([]byte, error) { return nil, nil } + +// Size always returns 0 and a nil error. +func (none) Size() int { return 0 } + +// Verify always returns a nil error. +func (none) Verify(_, _ []byte) error { return nil } diff --git a/vendor/github.com/gbrlsnchs/jwt/none_test.go b/vendor/github.com/gbrlsnchs/jwt/none_test.go new file mode 100644 index 0000000000..30619ec668 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/none_test.go @@ -0,0 +1 @@ +package jwt_test diff --git a/vendor/github.com/gbrlsnchs/jwt/payload.go b/vendor/github.com/gbrlsnchs/jwt/payload.go new file mode 100644 index 0000000000..c8329a6ba4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/payload.go @@ -0,0 +1,12 @@ +package jwt + +// Payload is a JWT payload according to the RFC 7519. +type Payload struct { + Issuer string `json:"iss,omitempty"` + Subject string `json:"sub,omitempty"` + Audience Audience `json:"aud,omitempty"` + ExpirationTime *Time `json:"exp,omitempty"` + NotBefore *Time `json:"nbf,omitempty"` + IssuedAt *Time `json:"iat,omitempty"` + JWTID string `json:"jti,omitempty"` +} diff --git a/vendor/github.com/gbrlsnchs/jwt/raw_token.go b/vendor/github.com/gbrlsnchs/jwt/raw_token.go new file mode 100644 index 0000000000..2f30303da0 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/raw_token.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "encoding/json" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +// ErrMalformed indicates a token doesn't have a valid format, as per the RFC 7519. +var ErrMalformed = internal.NewError("jwt: malformed token") + +// RawToken is a representation of a parsed JWT string. +type RawToken struct { + token []byte + sep1, sep2 int + + hd Header + alg Algorithm + + pl *Payload + vds []Validator +} + +func (rt *RawToken) header() []byte { return rt.token[:rt.sep1] } +func (rt *RawToken) headerPayload() []byte { return rt.token[:rt.sep2] } +func (rt *RawToken) payload() []byte { return rt.token[rt.sep1+1 : rt.sep2] } +func (rt *RawToken) sig() []byte { return rt.token[rt.sep2+1:] } + +func (rt *RawToken) setToken(token []byte, sep1, sep2 int) { + rt.sep1 = sep1 + rt.sep2 = sep1 + 1 + sep2 + rt.token = token +} + +func (rt *RawToken) decode(payload interface{}) (err error) { + pb, err := internal.DecodeToBytes(rt.payload()) + if err != nil { + return err + } + if !isJSONObject(pb) { + return ErrNotJSONObject + } + if err = json.Unmarshal(pb, payload); err != nil { + return err + } + for _, vd := range rt.vds { + if err = vd(rt.pl); err != nil { + return err + } + } + return nil +} + +func (rt *RawToken) decodeHeader() error { + if err := internal.Decode(rt.header(), &rt.hd); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/resolver.go b/vendor/github.com/gbrlsnchs/jwt/resolver.go new file mode 100644 index 0000000000..08f7767310 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/resolver.go @@ -0,0 +1,7 @@ +package jwt + +// Resolver is an Algorithm that needs to set some variables +// based on a Header before performing signing and verification. +type Resolver interface { + Resolve(Header) error +} diff --git a/vendor/github.com/gbrlsnchs/jwt/rsa_sha.go b/vendor/github.com/gbrlsnchs/jwt/rsa_sha.go new file mode 100644 index 0000000000..e8fbcd16c7 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/rsa_sha.go @@ -0,0 +1,150 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrRSANilPrivKey is the error for trying to sign a JWT with a nil private key. + ErrRSANilPrivKey = internal.NewError("jwt: RSA private key is nil") + // ErrRSANilPubKey is the error for trying to verify a JWT with a nil public key. + ErrRSANilPubKey = internal.NewError("jwt: RSA public key is nil") + // ErrRSAVerification is the error for an invalid RSA signature. + ErrRSAVerification = internal.NewError("jwt: RSA verification failed") + + _ Algorithm = new(RSASHA) +) + +// RSAPrivateKey is an option to set a private key to the RSA-SHA algorithm. +func RSAPrivateKey(priv *rsa.PrivateKey) func(*RSASHA) { + return func(rs *RSASHA) { + rs.priv = priv + } +} + +// RSAPublicKey is an option to set a public key to the RSA-SHA algorithm. +func RSAPublicKey(pub *rsa.PublicKey) func(*RSASHA) { + return func(rs *RSASHA) { + rs.pub = pub + } +} + +// RSASHA is an algorithm that uses RSA to sign SHA hashes. +type RSASHA struct { + name string + priv *rsa.PrivateKey + pub *rsa.PublicKey + sha crypto.Hash + size int + pool *hashPool + opts *rsa.PSSOptions +} + +func newRSASHA(name string, opts []func(*RSASHA), sha crypto.Hash, pss bool) *RSASHA { + rs := RSASHA{ + name: name, // cache name + sha: sha, + pool: newHashPool(sha.New), + } + for _, opt := range opts { + if opt != nil { + opt(&rs) + } + } + if rs.pub == nil { + if rs.priv == nil { + panic(ErrRSANilPrivKey) + } + rs.pub = &rs.priv.PublicKey + } + rs.size = rs.pub.Size() // cache size + if pss { + rs.opts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: sha, + } + } + return &rs +} + +// NewRS256 creates a new algorithm using RSA and SHA-256. +func NewRS256(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("RS256", opts, crypto.SHA256, false) +} + +// NewRS384 creates a new algorithm using RSA and SHA-384. +func NewRS384(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("RS384", opts, crypto.SHA384, false) +} + +// NewRS512 creates a new algorithm using RSA and SHA-512. +func NewRS512(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("RS512", opts, crypto.SHA512, false) +} + +// NewPS256 creates a new algorithm using RSA-PSS and SHA-256. +func NewPS256(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("PS256", opts, crypto.SHA256, true) +} + +// NewPS384 creates a new algorithm using RSA-PSS and SHA-384. +func NewPS384(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("PS384", opts, crypto.SHA384, true) +} + +// NewPS512 creates a new algorithm using RSA-PSS and SHA-512. +func NewPS512(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("PS512", opts, crypto.SHA512, true) +} + +// Name returns the algorithm's name. +func (rs *RSASHA) Name() string { + return rs.name +} + +// Sign signs headerPayload using either RSA-SHA or RSA-PSS-SHA algorithms. +func (rs *RSASHA) Sign(headerPayload []byte) ([]byte, error) { + if rs.priv == nil { + return nil, ErrRSANilPrivKey + } + sum, err := rs.pool.sign(headerPayload) + if err != nil { + return nil, err + } + if rs.opts != nil { + return rsa.SignPSS(rand.Reader, rs.priv, rs.sha, sum, rs.opts) + } + return rsa.SignPKCS1v15(rand.Reader, rs.priv, rs.sha, sum) +} + +// Size returns the signature's byte size. +func (rs *RSASHA) Size() int { + return rs.size +} + +// Verify verifies a signature based on headerPayload using either RSA-SHA or RSA-PSS-SHA. +func (rs *RSASHA) Verify(headerPayload, sig []byte) (err error) { + if rs.pub == nil { + return ErrRSANilPubKey + } + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + sum, err := rs.pool.sign(headerPayload) + if err != nil { + return err + } + if rs.opts != nil { + err = rsa.VerifyPSS(rs.pub, rs.sha, sum, sig, rs.opts) + } else { + err = rsa.VerifyPKCS1v15(rs.pub, rs.sha, sum, sig) + } + if err != nil { + return ErrRSAVerification + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/rsa_sha_test.go b/vendor/github.com/gbrlsnchs/jwt/rsa_sha_test.go new file mode 100644 index 0000000000..61865de37d --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/rsa_sha_test.go @@ -0,0 +1,467 @@ +package jwt_test + +import ( + "crypto/rand" + "crypto/rsa" + + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +var ( + rsaPrivateKey1, rsaPublicKey1 = genRSAKeys() + rsaPrivateKey2, rsaPublicKey2 = genRSAKeys() + + rsaTestCases = []testCase{ + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + } + rsaPSSTestCases = []testCase{ + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + } +) + +func TestNewRSASHA(t *testing.T) { + testCases := []struct { + builder func(...func(*jwt.RSASHA)) *jwt.RSASHA + opts func(*jwt.RSASHA) + err error + }{ + {jwt.NewRS256, nil, jwt.ErrRSANilPrivKey}, + {jwt.NewRS256, jwt.RSAPrivateKey(nil), jwt.ErrRSANilPrivKey}, + {jwt.NewRS256, jwt.RSAPrivateKey(rsaPrivateKey1), nil}, + {jwt.NewRS256, jwt.RSAPublicKey(rsaPublicKey1), nil}, + {jwt.NewRS384, nil, jwt.ErrRSANilPrivKey}, + {jwt.NewRS384, jwt.RSAPrivateKey(nil), jwt.ErrRSANilPrivKey}, + {jwt.NewRS384, jwt.RSAPrivateKey(rsaPrivateKey1), nil}, + {jwt.NewRS384, jwt.RSAPublicKey(rsaPublicKey1), nil}, + {jwt.NewRS512, nil, jwt.ErrRSANilPrivKey}, + {jwt.NewRS512, jwt.RSAPrivateKey(nil), jwt.ErrRSANilPrivKey}, + {jwt.NewRS512, jwt.RSAPrivateKey(rsaPrivateKey1), nil}, + {jwt.NewRS512, jwt.RSAPublicKey(rsaPublicKey1), nil}, + } + for _, tc := range testCases { + funcName := funcName(tc.builder) + t.Run(funcName, func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + t.Fatal("r is not an error") + } + if want, got := tc.err, err; !internal.ErrorIs(got, want) { + t.Fatalf("jwt.%s err mismatch (-want +got):\n%s", funcName, cmp.Diff(want, got)) + } + } + }() + _ = tc.builder(tc.opts) + if tc.err != nil { + t.Fatalf("jwt.%s didn't panicked", funcName) + } + }) + } +} + +func genRSAKeys() (*rsa.PrivateKey, *rsa.PublicKey) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + return priv, &priv.PublicKey +} diff --git a/vendor/github.com/gbrlsnchs/jwt/sign.go b/vendor/github.com/gbrlsnchs/jwt/sign.go new file mode 100644 index 0000000000..579c23a4d6 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/sign.go @@ -0,0 +1,75 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +// SignOption is a functional option for signing. +type SignOption func(*Header) + +// ContentType sets the "cty" claim for a Header before signing. +func ContentType(cty string) SignOption { + return func(hd *Header) { + hd.ContentType = cty + } +} + +// KeyID sets the "kid" claim for a Header before signing. +func KeyID(kid string) SignOption { + return func(hd *Header) { + hd.KeyID = kid + } +} + +// Sign signs a payload with alg. +func Sign(payload interface{}, alg Algorithm, opts ...SignOption) ([]byte, error) { + var hd Header + for _, opt := range opts { + opt(&hd) + } + if rv, ok := alg.(Resolver); ok { + if err := rv.Resolve(hd); err != nil { + return nil, internal.Errorf("jwt: failed to resolve: %w", err) + } + } + // Override some values or set them if empty. + hd.Algorithm = alg.Name() + hd.Type = "JWT" + // Marshal the header part of the JWT. + hb, err := json.Marshal(hd) + if err != nil { + return nil, err + } + + if payload == nil { + payload = Payload{} + } + // Marshal the claims part of the JWT. + pb, err := json.Marshal(payload) + if err != nil { + return nil, err + } + if !isJSONObject(pb) { + return nil, ErrNotJSONObject + } + + enc := base64.RawURLEncoding + h64len := enc.EncodedLen(len(hb)) + p64len := enc.EncodedLen(len(pb)) + sig64len := enc.EncodedLen(alg.Size()) + token := make([]byte, h64len+1+p64len+1+sig64len) + + enc.Encode(token, hb) + token[h64len] = '.' + enc.Encode(token[h64len+1:], pb) + sig, err := alg.Sign(token[:h64len+1+p64len]) + if err != nil { + return nil, err + } + token[h64len+1+p64len] = '.' + enc.Encode(token[h64len+1+p64len+1:], sig) + return token, nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/sign_test.go b/vendor/github.com/gbrlsnchs/jwt/sign_test.go new file mode 100644 index 0000000000..da1313ac04 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/sign_test.go @@ -0,0 +1,160 @@ +package jwt_test + +import ( + "errors" + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/gbrlsnchs/jwt/v3/jwtutil" + "github.com/google/go-cmp/cmp" +) + +var testErr = errors.New("test") + +func TestSign(t *testing.T) { + testCases := []struct { + payload interface{} + alg jwt.Algorithm + opts []jwt.SignOption + err error + }{ + { + payload: jwt.Payload{}, + alg: jwt.None(), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: &jwt.HMACSHA{}, + opts: nil, + err: jwt.ErrHMACMissingKey, + }, + { + payload: nil, + alg: jwt.NewHS256([]byte("secret")), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: jwt.NewHS384([]byte("secret")), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: jwt.NewHS512([]byte("secret")), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: &jwt.RSASHA{}, + opts: nil, + err: jwt.ErrRSANilPrivKey, + }, + { + payload: nil, + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: &jwt.ECDSASHA{}, + opts: nil, + err: jwt.ErrECDSANilPrivKey, + }, + { + payload: nil, + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es256PrivateKey1)), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es256PrivateKey1)), + opts: nil, + err: nil, + }, + { + payload: nil, + alg: &jwt.Ed25519{}, + opts: nil, + err: jwt.ErrEd25519NilPrivKey, + }, + { + payload: nil, + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + opts: nil, + err: nil, + }, + { + payload: 0xDEAD, + alg: jwt.NewHS256([]byte("secret")), + opts: nil, + err: jwt.ErrNotJSONObject, + }, + { + payload: jwt.Payload{}, + alg: &jwtutil.Resolver{New: func(hd jwt.Header) (jwt.Algorithm, error) { + return jwt.NewHS256([]byte("secret")), nil + }}, + opts: nil, + err: nil, + }, + { + payload: jwt.Payload{}, + alg: &jwtutil.Resolver{New: func(hd jwt.Header) (jwt.Algorithm, error) { + return nil, testErr + }}, + opts: nil, + err: testErr, + }, + { + payload: jwt.Payload{}, + alg: &jwtutil.Resolver{New: func(hd jwt.Header) (jwt.Algorithm, error) { + return nil, nil + }}, + opts: nil, + err: jwtutil.ErrNilAlg, + }, + { + payload: jwt.Payload{}, + alg: &jwtutil.Resolver{}, + opts: nil, + err: jwtutil.ErrNilAlg, + }, + } + for _, tc := range testCases { + t.Run(tc.alg.Name(), func(t *testing.T) { + token, err := jwt.Sign(tc.payload, tc.alg, tc.opts...) + if want, got := tc.err, err; !internal.ErrorIs(got, want) { + t.Fatalf("jwt.Sign error mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + if err == nil && len(token) == 0 { + t.Fatalf("jwt.Sign return value is empty") + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/time.go b/vendor/github.com/gbrlsnchs/jwt/time.go new file mode 100644 index 0000000000..ed3bbbb8eb --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/time.go @@ -0,0 +1,46 @@ +package jwt + +import ( + "encoding/json" + "time" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +// Time is the allowed format for time, as per the RFC 7519. +type Time struct { + time.Time +} + +// NumericDate is a resolved Unix time. +func NumericDate(tt time.Time) *Time { + if tt.Before(internal.Epoch) { + tt = internal.Epoch + } + return &Time{time.Unix(tt.Unix(), 0)} // set time using Unix time +} + +// MarshalJSON implements a marshaling function for time-related claims. +func (t Time) MarshalJSON() ([]byte, error) { + if t.Before(internal.Epoch) { + return json.Marshal(0) + } + return json.Marshal(t.Unix()) +} + +// UnmarshalJSON implements an unmarshaling function for time-related claims. +func (t *Time) UnmarshalJSON(b []byte) error { + var unix *int64 + if err := json.Unmarshal(b, &unix); err != nil { + return err + } + if unix == nil { + return nil + } + tt := time.Unix(*unix, 0) + if tt.Before(internal.Epoch) { + tt = internal.Epoch + } + t.Time = tt + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/time_test.go b/vendor/github.com/gbrlsnchs/jwt/time_test.go new file mode 100644 index 0000000000..7d9ee225cd --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/time_test.go @@ -0,0 +1,73 @@ +package jwt_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +func TestTimeMarshalJSON(t *testing.T) { + now := time.Now() + testCases := []struct { + tt jwt.Time + want int64 + }{ + {jwt.Time{}, 0}, + {jwt.Time{now}, now.Unix()}, + {jwt.Time{now.Add(24 * time.Hour)}, now.Add(24 * time.Hour).Unix()}, + {jwt.Time{now.Add(24 * 30 * 12 * time.Hour)}, now.Add(24 * 30 * 12 * time.Hour).Unix()}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + b, err := tc.tt.MarshalJSON() + if err != nil { + t.Fatal(err) + } + var n int64 + if err = json.Unmarshal(b, &n); err != nil { + t.Fatal(err) + } + if want, got := tc.want, n; got != want { + t.Errorf("jwt.Time.Marshal mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + }) + } +} + +func TestTimeUnmarshalJSON(t *testing.T) { + now := time.Now() + testCases := []struct { + n int64 + want jwt.Time + isNil bool + }{ + {now.Unix(), jwt.Time{now}, false}, + {internal.Epoch.Unix() - 0xDEAD, jwt.Time{internal.Epoch}, false}, + {internal.Epoch.Unix(), jwt.Time{internal.Epoch}, false}, + {internal.Epoch.Unix() + 0xDEAD, jwt.Time{internal.Epoch.Add(0xDEAD * time.Second)}, false}, + {0, jwt.Time{}, true}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + var n *int64 + if !tc.isNil { + n = &tc.n + } + b, err := json.Marshal(n) + if err != nil { + t.Fatal(err) + } + var tt jwt.Time + if err = tt.UnmarshalJSON(b); err != nil { + t.Fatal(err) + } + if want, got := tc.want.Unix(), tt.Unix(); got != want { + t.Errorf("jwt.Time.Unmarshal mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/tools/tools.go b/vendor/github.com/gbrlsnchs/jwt/tools/tools.go new file mode 100644 index 0000000000..e14181daf5 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/tools/tools.go @@ -0,0 +1,11 @@ +// +build tools + +package tools + +import ( + // These imports refer to tools intendend to be + // used while developing or running CI. + _ "github.com/magefile/mage" + _ "golang.org/x/lint/golint" + _ "golang.org/x/tools/cmd/goimports" +) diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/CHANGELOG.md b/vendor/github.com/gbrlsnchs/jwt/v3/CHANGELOG.md new file mode 100644 index 0000000000..c4ad9b2e1e --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/CHANGELOG.md @@ -0,0 +1,154 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] +### Added +- Signing and verifying using [RSA-PSS](https://en.wikipedia.org/wiki/Probabilistic_signature_scheme). +- Signing and verifying using [Ed25519](https://ed25519.cr.yp.to/). +- `Audience` type for handling the `aud` claim [according to the RFC](https://tools.ietf.org/html/rfc7519#section-4.1.3). +- `Size` method to `Signer` interface. +- `Verifier` interface. +- `RawToken` type. +- SHA constants. + +### Changed +- Improve performance by storing SHA hash functions in `sync.Pool`. +- Restructure `JWT` type by putting claims fields in a new struct. +- Change signing/verifying methods constructors' names. +- Unify signign/verifying methods constructors. +- Change `Signer` interface. +- Sign tokens with global function `Sign`. +- Verify tokens with global function `Verify`. + +### Removed +- Support for `go1.10`. +- `Marshal` and `Unmarshal` functions. +- `Marshaler` and `Unmarshaler` interfaces. + +## [2.0.0] - 2018-09-14 +### Added +- `Parse` and `ParseBytes` functions. +- `Marshal` and `Unmarshal` functions. +- `Marshaler` interface. +- `Unmarshaler` interface. +- Content type header parameter. + +### Changed +- Modify `Signer` signature. +- Add claims directly to `JWT` struct. +- Embed `header` to JWT. +- Add README texts, examples and usage. +- Rename `const.go` to `methods.go`. +- Add prefix `New` to signing methods constructors. +- Run `vgo` for testing (this enables testing the package against Go 1.10); + +### Removed +- `Sign` and `Verify` functions. +- Base64 encoding and deconding functions. +- `Options` struct. +- `Claims` struct. +- Functions that extract JWT from contexts and requests. + +## [1.1.0] - 2018-08-22 +### Changed +- Prevent expensive slice reallocation when signing a JWT. +- Refactor tests. + +### Fixed +- Signature of "none" algorithm. + +### Removed +- `internal` package. + +## [1.0.2] - 2018-07-19 +### Removed +- Makefile. +- Benchmark test (unused). + +## [1.0.1] - 2018-07-19 +### Fixed +- Wrap Travis CI Golang versions in quotes (for parsing issues, see [this](https://github.com/travis-ci/travis-ci/issues/9247)). + +## [1.0.0] - 2018-07-19 +### Added +- AppVeyor configuration file for running tests in Windows. +- `vgo` module file. + +### Changed +- `FromContext` now receives a context key as additional parameter. +- `FromContext` now tries to build a JWT if value in context is a string. +- Simplified Travis CI configuration file. +- Update README to explain the motivation to have created this library and its differences from other JWT libraries for Golang. + +## [0.5.0] - 2018-03-12 +### Added +- `FromContext` function to extract a JWT object from a context. +- `FromCookie` function to extract a JWT object from a cookie. + +### Changed +- Split tests into several files in order to organize them. + +### Fixed +- Example in README file. + +## [0.4.0] - 2018-02-16 +### Added +- Support for "none" method. +- Tests for "none" method. +- Missing JWTID claim. +- Plugable validation via validator functions. + +### Changed +- `(*JWT).JWTID` method name to `(*JWT).ID`. + +### Fixed +- Message in `ErrECDSASigLen`. + +### Removed +- Comments from custom errors, since they are self-explanatory. + +## [0.3.0] - 2018-02-13 +### Changed +- Package structure. + +### Removed +- Additional packages (`jwtcrypto` and `jwtutil`). + +## [0.2.0] - 2018-02-06 +### Added +- New test cases. +- Claims' timestamps validation. + +### Changed +- Tests organization. +- Use `time.After` and `time.Before` for validating timestamps. +- `jwtcrypto/none.None` now implements `jwtcrypto.Signer`. + +### Fixed +- Panicking when private or public keys are `nil`. + +## 0.1.0 - 2018-02-06 +### Added +- This changelog file. +- README file. +- MIT License. +- Travis CI configuration file. +- Makefile. +- Git ignore file. +- EditorConfig file. +- This package's source code, including examples and tests. +- Go dep files. + +[Unreleased]: https://github.com/gbrlsnchs/jwt/compare/v2.0.0...HEAD +[2.0.0]: https://github.com/gbrlsnchs/jwt/compare/v1.1.0...v2.0.0 +[1.1.0]: https://github.com/gbrlsnchs/jwt/compare/v1.0.2...v1.1.0 +[1.0.2]: https://github.com/gbrlsnchs/jwt/compare/v1.0.1...v1.0.2 +[1.0.1]: https://github.com/gbrlsnchs/jwt/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/gbrlsnchs/jwt/compare/v0.5.0...v1.0.0 +[0.5.0]: https://github.com/gbrlsnchs/jwt/compare/v0.4.0...v0.5.0 +[0.4.0]: https://github.com/gbrlsnchs/jwt/compare/v0.3.0...v0.4.0 +[0.3.0]: https://github.com/gbrlsnchs/jwt/compare/v0.2.0...v0.3.0 +[0.2.0]: https://github.com/gbrlsnchs/jwt/compare/v0.1.0...v0.2.0 diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/LICENSE b/vendor/github.com/gbrlsnchs/jwt/v3/LICENSE new file mode 100644 index 0000000000..f276fda569 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Gabriel Sanches + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/Makefile b/vendor/github.com/gbrlsnchs/jwt/v3/Makefile new file mode 100644 index 0000000000..f429d0c1d5 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/Makefile @@ -0,0 +1,19 @@ +export GO111MODULE ?= on + +all: export GO111MODULE := off +all: + go get -u golang.org/x/tools/cmd/goimports + go get -u golang.org/x/lint/golint + +fix: + @goimports -w *.go + +lint: + @! goimports -d . | grep -vF "no errors" + @golint -set_exit_status ./... + +bench: + @go test -v -run=^$$ -bench=. + +test: lint + @go test -v ./... diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/README.md b/vendor/github.com/gbrlsnchs/jwt/v3/README.md new file mode 100644 index 0000000000..bcf90f3b35 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/README.md @@ -0,0 +1,248 @@ +# jwt (JSON Web Token for Go) +[![JWT compatible](https://jwt.io/img/badge.svg)](https://jwt.io) + +[![CircleCI](https://circleci.com/gh/gbrlsnchs/jwt.svg?style=shield)](https://circleci.com/gh/gbrlsnchs/jwt) +[![Go Report Card](https://goreportcard.com/badge/github.com/gbrlsnchs/jwt)](https://goreportcard.com/report/github.com/gbrlsnchs/jwt) +[![GoDoc](https://godoc.org/github.com/gbrlsnchs/jwt?status.svg)](https://godoc.org/github.com/gbrlsnchs/jwt) +[![Join the chat at https://gitter.im/gbrlsnchs/jwt](https://badges.gitter.im/gbrlsnchs/jwt.svg)](https://gitter.im/gbrlsnchs/jwt?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +## Compatibility +[![Version Compatibility](https://img.shields.io/badge/go%20modules-go1.11+-5272b4.svg)](https://github.com/gbrlsnchs/jwt#installing) +[![vgo](https://img.shields.io/badge/vgo-go1.10-5272b4.svg)](https://github.com/gbrlsnchs/jwt#installing) +[![go get](https://img.shields.io/badge/go%20get-go1.9.7+,%20go1.10.3+%20and%20go1.11-5272b4.svg)](https://github.com/gbrlsnchs/jwt#installing) + +## About +This package is a JWT signer, verifier and validator for [Go](https://golang.org) (or Golang). + +Although there are many JWT packages out there for Go, many lack support for some signing, verifying or validation methods and, when they don't, they're overcomplicated. This package tries to mimic the ease of use from [Node JWT library](https://github.com/auth0/node-jsonwebtoken)'s API while following the [Effective Go](https://golang.org/doc/effective_go.html) guidelines. + +Support for [JWE](https://tools.ietf.org/html/rfc7516) isn't provided (not yet but is in the roadmap, see #17). Instead, [JWS](https://tools.ietf.org/html/rfc7515) is used, narrowed down to the [JWT specification](https://tools.ietf.org/html/rfc7519). + +### Supported signing methods +| | SHA-256 | SHA-384 | SHA-512 | +|:-------:|:------------------:|:------------------:|:------------------:| +| HMAC | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| RSA | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| RSA-PSS | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| ECDSA | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| EdDSA | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | + +## Important +Branch `master` is unstable, **always** use tagged versions. That way it is possible to differentiate pre-release tags from production ones. +In other words, API changes all the time in `master`. It's a place for public experiment. Thus, make use of the latest stable version via Go modules. + +## Usage +Full documentation [here](https://godoc.org/github.com/gbrlsnchs/jwt). + +### Installing +
Go 1.12 onward +

+ +```sh +$ go get -u github.com/gbrlsnchs/jwt/v3 +``` + +

+
+ +
Go 1.11 +

+ +```sh +$ GO111MODULE=on go get -u github.com/gbrlsnchs/jwt/v3 +``` + +

+
+ +
Go 1.10 with vgo +

+ +```sh +$ vgo get -u github.com/gbrlsnchs/jwt/v3 +``` + +

+
+ +
Go 1.9.7+, Go 1.10.3+ (without vgo) and Go 1.11 (when GO111MODULE=off) +

+ +```sh +$ go get -u github.com/gbrlsnchs/jwt/v3 +``` + +#### Important +Your project must be inside the `GOPATH`. + +

+
+ +### Signing +```go +import ( + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +type CustomPayload struct { + jwt.Payload + Foo string `json:"foo,omitempty"` + Bar int `json:"bar,omitempty"` +} + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + now := time.Now() + pl := CustomPayload{ + Payload: jwt.Payload{ + Issuer: "gbrlsnchs", + Subject: "someone", + Audience: jwt.Audience{"https://golang.org", "https://jwt.io"}, + ExpirationTime: jwt.NumericDate(now.Add(24 * 30 * 12 * time.Hour)), + NotBefore: jwt.NumericDate(now.Add(30 * time.Minute)), + IssuedAt: jwt.NumericDate(now), + JWTID: "foobar", + }, + Foo: "foo", + Bar: 1337, + } + + token, err := jwt.Sign(pl, hs) + if err != nil { + // ... + } + + // ... +} +``` + +### Verifying +```go +import "github.com/gbrlsnchs/jwt/v3" + +type CustomPayload struct { + jwt.Payload + Foo string `json:"foo,omitempty"` + Bar int `json:"bar,omitempty"` +} + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + // ... + + var pl CustomPayload + hd, err := jwt.Verify(token, hs, &pl) + if err != nil { + // ... + } + + // ... +} +``` + +### Other use case examples +
Setting "cty" and "kid" claims +

+ +The "cty" and "kid" claims can be set by passing options to the `jwt.Sign` function: +```go +import ( + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + pl := jwt.Payload{ + Subject: "gbrlsnchs", + Issuer: "gsr.dev", + IssuedAt: jwt.NumericDate(time.Now()), + } + + token, err := jwt.Sign(pl, hs, jwt.ContentType("JWT"), jwt.KeyID("my_key")) + if err != nil { + // ... + } + + // ... +} +``` + +

+
+ +
Validating "alg" before verifying +

+ +For validating the "alg" field in a JOSE header **before** verification, the `jwt.ValidateHeader` option must be passed to `jwt.Verify`. +```go +import "github.com/gbrlsnchs/jwt/v3" + +var hs = jwt.NewHS256([]byte("secret")) + +func main() { + // ... + + var pl jwt.Payload + if _, err := jwt.Verify(token, hs, &pl, jwt.ValidateHeader); err != nil { + // ... + } + + // ... +} +``` + +

+
+ +
Using an Algorithm resolver +

+ +```go +import ( + "errors" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/jwtutil" +) + +var ( + // ... + + rs256 = jwt.NewRS256(jwt.RSAPublicKey(myRSAPublicKey)) + es256 = jwt.NewES256(jwt.ECDSAPublicKey(myECDSAPublicKey)) +) + +func main() { + rv := &jwtutil.Resolver{New: func(hd jwt.Header) { + switch hd.KeyID { + case "foo": + return rs256, nil + case "bar": + return es256, nil + default: + return nil, errors.New(`invalid "kid"`) + } + }} + var pl jwt.Payload + if _, err := jwt.Verify(token, rv, &pl); err != nil { + // ... + } + + // ... +} +``` + +

+
+ +## Contributing +### How to help +- For bugs and opinions, please [open an issue](https://github.com/gbrlsnchs/jwt/issues/new) +- For pushing changes, please [open a pull request](https://github.com/gbrlsnchs/jwt/compare) diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/algorithm.go b/vendor/github.com/gbrlsnchs/jwt/v3/algorithm.go new file mode 100644 index 0000000000..59c4ae82f1 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/algorithm.go @@ -0,0 +1,15 @@ +package jwt + +import ( + // Load all hashing functions needed. + _ "crypto/sha256" + _ "crypto/sha512" +) + +// Algorithm is an algorithm for both signing and verifying a JWT. +type Algorithm interface { + Name() string + Sign(headerPayload []byte) ([]byte, error) + Size() int + Verify(headerPayload, sig []byte) error +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/audience.go b/vendor/github.com/gbrlsnchs/jwt/v3/audience.go new file mode 100644 index 0000000000..4fc3175a23 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/audience.go @@ -0,0 +1,43 @@ +package jwt + +import "encoding/json" + +// Audience is a special claim that may either be +// a single string or an array of strings, as per the RFC 7519. +type Audience []string + +// MarshalJSON implements a marshaling function for "aud" claim. +func (a Audience) MarshalJSON() ([]byte, error) { + switch len(a) { + case 0: + return json.Marshal("") // nil or empty slice returns an empty string + case 1: + return json.Marshal(a[0]) + default: + return json.Marshal([]string(a)) + } +} + +// UnmarshalJSON implements an unmarshaling function for "aud" claim. +func (a *Audience) UnmarshalJSON(b []byte) error { + var ( + v interface{} + err error + ) + if err = json.Unmarshal(b, &v); err != nil { + return err + } + switch vv := v.(type) { + case string: + aud := make(Audience, 1) + aud[0] = vv + *a = aud + case []interface{}: + aud := make(Audience, len(vv)) + for i := range vv { + aud[i] = vv[i].(string) + } + *a = aud + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/audience_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/audience_test.go new file mode 100644 index 0000000000..28aec269b4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/audience_test.go @@ -0,0 +1,91 @@ +package jwt_test + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/gbrlsnchs/jwt/v3" +) + +func TestAudienceMarshal(t *testing.T) { + t.Run("omitempty", func(t *testing.T) { + var ( + b []byte + err error + v = struct { + Audience jwt.Audience `json:"aud,omitempty"` + }{} + ) + if b, err = json.Marshal(v); err != nil { + t.Fatal(err) + } + checkAudMarshal(t, "{}", b) + + }) + + testCases := []struct { + aud jwt.Audience + expected string + }{ + {jwt.Audience{"foo"}, `"foo"`}, + {jwt.Audience{"foo", "bar"}, `["foo","bar"]`}, + {nil, `""`}, + {jwt.Audience{}, `""`}, + {jwt.Audience{""}, `""`}, + } + for _, tc := range testCases { + t.Run(tc.expected, func(t *testing.T) { + var ( + b []byte + err error + ) + if tc.aud != nil { + if b, err = tc.aud.MarshalJSON(); err != nil { + t.Fatal(err) + } + checkAudMarshal(t, tc.expected, b) + } + if b, err = json.Marshal(tc.aud); err != nil { + t.Fatal(err) + } + checkAudMarshal(t, tc.expected, b) + }) + } +} + +func TestAudienceUnmarshal(t *testing.T) { + testCases := []struct { + jstr []byte + expected jwt.Audience + }{ + {[]byte(`"foo"`), jwt.Audience{"foo"}}, + {[]byte(`["foo","bar"]`), jwt.Audience{"foo", "bar"}}, + {[]byte("[]"), jwt.Audience{}}, + } + for _, tc := range testCases { + t.Run(string(tc.jstr), func(t *testing.T) { + var aud jwt.Audience + if err := aud.UnmarshalJSON(tc.jstr); err != nil { + t.Fatal(err) + } + checkAudUnmarshal(t, tc.expected, aud) + if err := json.Unmarshal(tc.jstr, &aud); err != nil { + t.Fatal(err) + } + checkAudUnmarshal(t, tc.expected, aud) + }) + } +} + +func checkAudMarshal(t *testing.T, want string, got []byte) { + if want != string(got) { + t.Errorf("want %q, got %q", want, got) + } +} + +func checkAudUnmarshal(t *testing.T, want, got jwt.Audience) { + if !reflect.DeepEqual(want, got) { + t.Errorf("want %v, got %v", want, got) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/bench_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/bench_test.go new file mode 100644 index 0000000000..572d6434a3 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/bench_test.go @@ -0,0 +1,80 @@ +package jwt_test + +import ( + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +var ( + benchHS256 = jwt.NewHS256([]byte("secret")) + benchRecv []byte +) + +func BenchmarkSign(b *testing.B) { + now := time.Now() + var ( + token []byte + err error + pl = jwt.Payload{ + Issuer: "gbrlsnchs", + Subject: "someone", + Audience: jwt.Audience{"https://golang.org", "https://jwt.io"}, + ExpirationTime: jwt.NumericDate(now.Add(24 * 30 * 12 * time.Hour)), + NotBefore: jwt.NumericDate(now.Add(30 * time.Minute)), + IssuedAt: jwt.NumericDate(now), + JWTID: "foobar", + } + ) + b.Run("Default", func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + token, err = jwt.Sign(pl, benchHS256) + if err != nil { + b.Fatal(err) + } + } + }) + b.Run(`With "kid"`, func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + token, err = jwt.Sign(pl, benchHS256, jwt.KeyID("kid")) + if err != nil { + b.Fatal(err) + } + } + }) + b.Run(`With "cty" and "kid"`, func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + token, err = jwt.Sign(pl, benchHS256, jwt.ContentType("cty"), jwt.KeyID("kid")) + if err != nil { + b.Fatal(err) + } + } + }) + + benchRecv = token + +} + +func BenchmarkVerify(b *testing.B) { + var ( + token = []byte( + "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9." + + "eyJpc3MiOiJnYnJsc25jaHMiLCJzdWIiOiJzb21lb25lIiwiYXVkIjpbImh0dHBzOi8vZ29sYW5nLm9yZyIsImh0dHBzOi8vand0LmlvIl0sImV4cCI6MTU5MzM5MTE4MiwibmJmIjoxNTYyMjg4OTgyLCJpYXQiOjE1NjIyODcxODIsImp0aSI6ImZvb2JhciJ9." + + "bKevp7jmMbH9-Hy5g5OxLgq8tg13z9voH7lZ4m9y484", + ) + err error + ) + b.Run("Default", func(b *testing.B) { + b.ReportAllocs() + for n := 0; n < b.N; n++ { + var pl jwt.Payload + if _, err = jwt.Verify(token, benchHS256, &pl); err != nil { + b.Fatal(err) + } + } + }) +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/doc.go b/vendor/github.com/gbrlsnchs/jwt/v3/doc.go new file mode 100644 index 0000000000..51632cd856 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/doc.go @@ -0,0 +1,2 @@ +// Package jwt is a JSON Web Token signer, verifier and validator. +package jwt diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/ecdsa_sha.go b/vendor/github.com/gbrlsnchs/jwt/v3/ecdsa_sha.go new file mode 100644 index 0000000000..3e87f16c58 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/ecdsa_sha.go @@ -0,0 +1,149 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrECDSANilPrivKey is the error for trying to sign a JWT with a nil private key. + ErrECDSANilPrivKey = errors.New("jwt: ECDSA private key is nil") + // ErrECDSANilPubKey is the error for trying to verify a JWT with a nil public key. + ErrECDSANilPubKey = errors.New("jwt: ECDSA public key is nil") + // ErrECDSAVerification is the error for an invalid ECDSA signature. + ErrECDSAVerification = errors.New("jwt: ECDSA verification failed") + + _ Algorithm = new(ECDSASHA) +) + +// ECDSAPrivateKey is an option to set a private key to the ECDSA-SHA algorithm. +func ECDSAPrivateKey(priv *ecdsa.PrivateKey) func(*ECDSASHA) { + return func(es *ECDSASHA) { + es.priv = priv + } +} + +// ECDSAPublicKey is an option to set a public key to the ECDSA-SHA algorithm. +func ECDSAPublicKey(pub *ecdsa.PublicKey) func(*ECDSASHA) { + return func(es *ECDSASHA) { + es.pub = pub + } +} + +func byteSize(bitSize int) int { + byteSize := bitSize / 8 + if bitSize%8 > 0 { + return byteSize + 1 + } + return byteSize +} + +// ECDSASHA is an algorithm that uses ECDSA to sign SHA hashes. +type ECDSASHA struct { + name string + priv *ecdsa.PrivateKey + pub *ecdsa.PublicKey + sha crypto.Hash + size int + + pool *hashPool +} + +func newECDSASHA(name string, opts []func(*ECDSASHA), sha crypto.Hash) *ECDSASHA { + es := ECDSASHA{ + name: name, + sha: sha, + pool: newHashPool(sha.New), + } + for _, opt := range opts { + opt(&es) + } + if es.pub == nil { + es.pub = &es.priv.PublicKey + } + es.size = byteSize(es.pub.Params().BitSize) * 2 + return &es +} + +// NewES256 creates a new algorithm using ECDSA and SHA-256. +func NewES256(opts ...func(*ECDSASHA)) *ECDSASHA { + return newECDSASHA("ES256", opts, crypto.SHA256) +} + +// NewES384 creates a new algorithm using ECDSA and SHA-384. +func NewES384(opts ...func(*ECDSASHA)) *ECDSASHA { + return newECDSASHA("ES384", opts, crypto.SHA384) +} + +// NewES512 creates a new algorithm using ECDSA and SHA-512. +func NewES512(opts ...func(*ECDSASHA)) *ECDSASHA { + return newECDSASHA("ES512", opts, crypto.SHA512) +} + +// Name returns the algorithm's name. +func (es *ECDSASHA) Name() string { + return es.name +} + +// Sign signs headerPayload using the ECDSA-SHA algorithm. +func (es *ECDSASHA) Sign(headerPayload []byte) ([]byte, error) { + if es.priv == nil { + return nil, ErrECDSANilPrivKey + } + return es.sign(headerPayload) +} + +// Size returns the signature's byte size. +func (es *ECDSASHA) Size() int { + return es.size +} + +// Verify verifies a signature based on headerPayload using ECDSA-SHA. +func (es *ECDSASHA) Verify(headerPayload, sig []byte) (err error) { + if es.pub == nil { + return ErrECDSANilPubKey + } + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + byteSize := byteSize(es.pub.Params().BitSize) + if len(sig) != byteSize*2 { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:byteSize]) + s := big.NewInt(0).SetBytes(sig[byteSize:]) + sum, err := es.pool.sign(headerPayload) + if err != nil { + return err + } + if !ecdsa.Verify(es.pub, sum, r, s) { + return ErrECDSAVerification + } + return nil +} + +func (es *ECDSASHA) sign(headerPayload []byte) ([]byte, error) { + sum, err := es.pool.sign(headerPayload) + if err != nil { + return nil, err + } + r, s, err := ecdsa.Sign(rand.Reader, es.priv, sum) + if err != nil { + return nil, err + } + byteSize := byteSize(es.priv.Params().BitSize) + rbytes := r.Bytes() + rsig := make([]byte, byteSize) + copy(rsig[byteSize-len(rbytes):], rbytes) + + sbytes := s.Bytes() + ssig := make([]byte, byteSize) + copy(ssig[byteSize-len(sbytes):], sbytes) + return append(rsig, ssig...), nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/ecdsa_sha_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/ecdsa_sha_test.go new file mode 100644 index 0000000000..58a602904b --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/ecdsa_sha_test.go @@ -0,0 +1,26 @@ +package jwt_test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" +) + +var ( + es256PrivateKey1, es256PublicKey1 = genECDSAKeys(elliptic.P256()) + es256PrivateKey2, es256PublicKey2 = genECDSAKeys(elliptic.P256()) + + es384PrivateKey1, es384PublicKey1 = genECDSAKeys(elliptic.P384()) + es384PrivateKey2, es384PublicKey2 = genECDSAKeys(elliptic.P384()) + + es512PrivateKey1, es512PublicKey1 = genECDSAKeys(elliptic.P521()) + es512PrivateKey2, es512PublicKey2 = genECDSAKeys(elliptic.P521()) +) + +func genECDSAKeys(c elliptic.Curve) (*ecdsa.PrivateKey, *ecdsa.PublicKey) { + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + panic(err) + } + return priv, &priv.PublicKey +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/ed25519_go1_12.go b/vendor/github.com/gbrlsnchs/jwt/v3/ed25519_go1_12.go new file mode 100644 index 0000000000..070aeb1493 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/ed25519_go1_12.go @@ -0,0 +1,85 @@ +// +build !go1.13 + +package jwt + +import ( + "errors" + + "github.com/gbrlsnchs/jwt/v3/internal" + "golang.org/x/crypto/ed25519" +) + +var ( + // ErrEd25519PrivKey is the error for trying to sign a JWT with a nil private key. + ErrEd25519PrivKey = errors.New("jwt: Ed25519 private key is nil") + // ErrEd25519PubKey is the error for trying to verify a JWT with a nil public key. + ErrEd25519PubKey = errors.New("jwt: Ed25519 public key is nil") + // ErrEd25519Verification is the error for when verification with Ed25519 fails. + ErrEd25519Verification = errors.New("jwt: Ed25519 verification failed") + + _ Algorithm = new(Ed25519) +) + +// Ed25519PrivateKey is an option to set a private key to the Ed25519 algorithm. +func Ed25519PrivateKey(priv ed25519.PrivateKey) func(*Ed25519) { + return func(ed *Ed25519) { + ed.priv = priv + } +} + +// Ed25519PublicKey is an option to set a public key to the Ed25519 algorithm. +func Ed25519PublicKey(pub ed25519.PublicKey) func(*Ed25519) { + return func(ed *Ed25519) { + ed.pub = pub + } +} + +// Ed25519 is an algorithm that uses EdDSA to sign SHA-512 hashes. +type Ed25519 struct { + priv ed25519.PrivateKey + pub ed25519.PublicKey +} + +// NewEd25519 creates a new algorithm using EdDSA and SHA-512. +func NewEd25519(opts ...func(*Ed25519)) *Ed25519 { + var ed Ed25519 + for _, opt := range opts { + opt(&ed) + } + if ed.pub == nil { + ed.pub = ed.priv.Public().(ed25519.PublicKey) + } + return &ed +} + +// Name returns the algorithm's name. +func (*Ed25519) Name() string { + return "Ed25519" +} + +// Sign signs headerPayload using the Ed25519 algorithm. +func (ed *Ed25519) Sign(headerPayload []byte) ([]byte, error) { + if ed.priv == nil { + return nil, ErrEd25519PrivKey + } + return ed25519.Sign(ed.priv, headerPayload), nil +} + +// Size returns the signature byte size. +func (*Ed25519) Size() int { + return ed25519.SignatureSize +} + +// Verify verifies a payload and a signature. +func (ed *Ed25519) Verify(payload, sig []byte) (err error) { + if ed.pub == nil { + return ErrEd25519PubKey + } + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + if !ed25519.Verify(ed.pub, payload, sig) { + return ErrEd25519Verification + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/ed25519_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/ed25519_test.go new file mode 100644 index 0000000000..e57692ab07 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/ed25519_test.go @@ -0,0 +1,8 @@ +package jwt_test + +import "github.com/gbrlsnchs/jwt/v3/internal" + +var ( + ed25519PrivateKey1, ed25519PublicKey1 = internal.GenerateEd25519Keys() + ed25519PrivateKey2, ed25519PublicKey2 = internal.GenerateEd25519Keys() +) diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/go.mod b/vendor/github.com/gbrlsnchs/jwt/v3/go.mod new file mode 100644 index 0000000000..f93a6276e6 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/go.mod @@ -0,0 +1,5 @@ +module github.com/gbrlsnchs/jwt/v3 + +require golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 + +go 1.10 diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/go.sum b/vendor/github.com/gbrlsnchs/jwt/v3/go.sum new file mode 100644 index 0000000000..ab4e5085d7 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/go.sum @@ -0,0 +1,2 @@ +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/hash_pool.go b/vendor/github.com/gbrlsnchs/jwt/v3/hash_pool.go new file mode 100644 index 0000000000..89aa4cb9a0 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/hash_pool.go @@ -0,0 +1,27 @@ +package jwt + +import ( + "hash" + "sync" +) + +type hashPool struct { + *sync.Pool +} + +func newHashPool(hfunc func() hash.Hash) *hashPool { + return &hashPool{&sync.Pool{New: func() interface{} { return hfunc() }}} +} + +func (hp *hashPool) sign(headerPayload []byte) ([]byte, error) { + hh := hp.Pool.Get().(hash.Hash) + defer func() { + hh.Reset() + hp.Pool.Put(hh) + }() + + if _, err := hh.Write(headerPayload); err != nil { + return nil, err + } + return hh.Sum(nil), nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/header.go b/vendor/github.com/gbrlsnchs/jwt/v3/header.go new file mode 100644 index 0000000000..ae4fb6dfb4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/header.go @@ -0,0 +1,11 @@ +package jwt + +// Header is a JOSE header narrowed down to the JWT specification from RFC 7519. +// +// Parameters are ordered according to the RFC 7515. +type Header struct { + Algorithm string `json:"alg,omitempty"` + ContentType string `json:"cty,omitempty"` + KeyID string `json:"kid,omitempty"` + Type string `json:"typ,omitempty"` +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/hmac_sha.go b/vendor/github.com/gbrlsnchs/jwt/v3/hmac_sha.go new file mode 100644 index 0000000000..91c54d2b56 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/hmac_sha.go @@ -0,0 +1,86 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" + "hash" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrHMACMissingKey is the error for trying to sign or verify a JWT with an empty key. + ErrHMACMissingKey = errors.New("jwt: HMAC key is empty") + // ErrHMACVerification is the error for an invalid signature. + ErrHMACVerification = errors.New("jwt: HMAC verification failed") + + _ Algorithm = new(HMACSHA) +) + +// HMACSHA is an algorithm that uses HMAC to sign SHA hashes. +type HMACSHA struct { + name string + key []byte + sha crypto.Hash + size int + pool *hashPool +} + +func newHMACSHA(name string, key []byte, sha crypto.Hash) *HMACSHA { + return &HMACSHA{ + name: name, // cache name + key: key, + sha: sha, + size: sha.Size(), // cache size + pool: newHashPool(func() hash.Hash { return hmac.New(sha.New, key) }), + } +} + +// NewHS256 creates a new algorithm using HMAC and SHA-256. +func NewHS256(key []byte) *HMACSHA { + return newHMACSHA("HS256", key, crypto.SHA256) +} + +// NewHS384 creates a new algorithm using HMAC and SHA-384. +func NewHS384(key []byte) *HMACSHA { + return newHMACSHA("HS384", key, crypto.SHA384) +} + +// NewHS512 creates a new algorithm using HMAC and SHA-512. +func NewHS512(key []byte) *HMACSHA { + return newHMACSHA("HS512", key, crypto.SHA512) +} + +// Name returns the algorithm's name. +func (hs *HMACSHA) Name() string { + return hs.name +} + +// Sign signs headerPayload using the HMAC-SHA algorithm. +func (hs *HMACSHA) Sign(headerPayload []byte) ([]byte, error) { + if string(hs.key) == "" { + return nil, ErrHMACMissingKey + } + return hs.pool.sign(headerPayload) +} + +// Size returns the signature's byte size. +func (hs *HMACSHA) Size() int { + return hs.size +} + +// Verify verifies a signature based on headerPayload using HMAC-SHA. +func (hs *HMACSHA) Verify(headerPayload, sig []byte) (err error) { + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + sig2, err := hs.Sign(headerPayload) + if err != nil { + return err + } + if !hmac.Equal(sig, sig2) { + return ErrHMACVerification + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/hmac_sha_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/hmac_sha_test.go new file mode 100644 index 0000000000..70f52b9318 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/hmac_sha_test.go @@ -0,0 +1,6 @@ +package jwt_test + +var ( + hmacKey1 = []byte("secret") + hmacKey2 = []byte("terces") +) diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/internal/decode.go b/vendor/github.com/gbrlsnchs/jwt/v3/internal/decode.go new file mode 100644 index 0000000000..f03525e501 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/internal/decode.go @@ -0,0 +1,25 @@ +package internal + +import ( + "encoding/base64" + "encoding/json" +) + +// Decode decodes a Base64 encoded JSON object using the proper encoding for JWTs. +func Decode(enc []byte, v interface{}) error { + dec, err := DecodeToBytes(enc) + if err != nil { + return err + } + return json.Unmarshal(dec, v) +} + +// DecodeToBytes decodes a Base64 string using the proper encoding for JWTs. +func DecodeToBytes(enc []byte) ([]byte, error) { + encoding := base64.RawURLEncoding + dec := make([]byte, encoding.DecodedLen(len(enc))) + if _, err := encoding.Decode(dec, enc); err != nil { + return nil, err + } + return dec, nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/internal/decode_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/internal/decode_test.go new file mode 100644 index 0000000000..6f172d7617 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/internal/decode_test.go @@ -0,0 +1,52 @@ +package internal_test + +import ( + "encoding/base64" + "testing" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + stdEnc = base64.StdEncoding + rawURLEnc = base64.RawURLEncoding +) + +type decodeTest struct { + X string `json:"x,omitempty"` +} + +func TestDecode(t *testing.T) { + testCases := []struct { + encoding *base64.Encoding + json string + expected string + errors bool + }{ + {rawURLEnc, "{}", "", false}, + {rawURLEnc, `{"x":"test"}`, "test", false}, + {stdEnc, "{}", "", true}, + {stdEnc, `{"x":"test"}`, "test", false}, // the output is the same as with RawURLEncoding + {nil, "{}", "", true}, + {nil, `{"x":"test"}`, "", true}, + } + for _, tc := range testCases { + t.Run(tc.json, func(t *testing.T) { + b64 := tc.json + if tc.encoding != nil { + b64 = tc.encoding.EncodeToString([]byte(tc.json)) + } + t.Logf("b64: %s", b64) + var ( + dt decodeTest + err = internal.Decode([]byte(b64), &dt) + ) + if want, got := tc.errors, err != nil; want != got { + t.Fatalf("want %t, got %t: %v", want, got, err) + } + if want, got := tc.expected, dt.X; want != got { + t.Errorf("want %q, got %q", want, got) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/internal/ed25519_go1_12.go b/vendor/github.com/gbrlsnchs/jwt/v3/internal/ed25519_go1_12.go new file mode 100644 index 0000000000..3436a4c64b --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/internal/ed25519_go1_12.go @@ -0,0 +1,18 @@ +// +build !go1.13 + +package internal + +import ( + "crypto/rand" + + "golang.org/x/crypto/ed25519" +) + +// GenerateEd25519Keys generates a pair of keys for testing purposes. +func GenerateEd25519Keys() (ed25519.PrivateKey, ed25519.PublicKey) { + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + return priv, pub +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/internal/epoch.go b/vendor/github.com/gbrlsnchs/jwt/v3/internal/epoch.go new file mode 100644 index 0000000000..494c09e6bb --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/internal/epoch.go @@ -0,0 +1,6 @@ +package internal + +import "time" + +// Epoch is 01/01/1970. +var Epoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/internal/rsa_signature_size.go b/vendor/github.com/gbrlsnchs/jwt/v3/internal/rsa_signature_size.go new file mode 100644 index 0000000000..ed99e7bfe6 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/internal/rsa_signature_size.go @@ -0,0 +1,10 @@ +// +build go1.11 + +package internal + +import "crypto/rsa" + +// RSASignatureSize returns the signature size of an RSA signature. +func RSASignatureSize(pub *rsa.PublicKey) int { + return pub.Size() +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/internal/rsa_signature_size_go1_10.go b/vendor/github.com/gbrlsnchs/jwt/v3/internal/rsa_signature_size_go1_10.go new file mode 100644 index 0000000000..55a12aa3a5 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/internal/rsa_signature_size_go1_10.go @@ -0,0 +1,11 @@ +// +build !go1.11 + +package internal + +import "crypto/rsa" + +// RSASignatureSize returns the signature size of an RSA signature. +func RSASignatureSize(pub *rsa.PublicKey) int { + // As defined at https://golang.org/src/crypto/rsa/rsa.go?s=1609:1641#L39. + return (pub.N.BitLen() + 7) / 8 +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/jwtutil/resolver.go b/vendor/github.com/gbrlsnchs/jwt/v3/jwtutil/resolver.go new file mode 100644 index 0000000000..748291af05 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/jwtutil/resolver.go @@ -0,0 +1,46 @@ +package jwtutil + +import ( + "errors" + + "github.com/gbrlsnchs/jwt/v3" +) + +// Resolver is an Algorithm resolver. +type Resolver struct { + New func(jwt.Header) (jwt.Algorithm, error) + alg jwt.Algorithm +} + +// Name returns an Algorithm's name. +func (rv *Resolver) Name() string { + return rv.alg.Name() +} + +// Resolve sets an Algorithm based on a JOSE Header. +func (rv *Resolver) Resolve(hd jwt.Header) error { + if rv.alg != nil { + return nil + } + alg, err := rv.New(hd) + if err != nil { + return err + } + rv.alg = alg + return nil +} + +// Sign returns an error since Resolver doesn't support signing. +func (rv *Resolver) Sign(_ []byte) ([]byte, error) { + return nil, errors.New("jwtutil: Resolver can only verify") +} + +// Size returns an Algorithm's size. +func (rv *Resolver) Size() int { + return rv.alg.Size() +} + +// Verify resolves and Algorithm and verifies using it. +func (rv *Resolver) Verify(headerPayload, sig []byte) error { + return rv.alg.Verify(headerPayload, sig) +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/jwtutil/resolver_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/jwtutil/resolver_test.go new file mode 100644 index 0000000000..f5e7b0a68a --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/jwtutil/resolver_test.go @@ -0,0 +1,52 @@ +package jwtutil_test + +import ( + "errors" + "testing" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/jwtutil" +) + +var hs256 = jwt.NewHS256([]byte("resolver")) + +func TestResolver(t *testing.T) { + testCases := []struct { + signer jwt.Algorithm + signOpts []jwt.SignOption + verifier jwt.Algorithm + }{ + { + signer: hs256, + verifier: &jwtutil.Resolver{ + New: func(hd jwt.Header) (jwt.Algorithm, error) { + return hs256, nil + }, + }, + }, + { + signer: hs256, + signOpts: []jwt.SignOption{jwt.KeyID("test")}, + verifier: &jwtutil.Resolver{ + New: func(hd jwt.Header) (jwt.Algorithm, error) { + if hd.KeyID != "test" { + return nil, errors.New(`wrong "kid"`) + } + return hs256, nil + }, + }, + }, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + token, err := jwt.Sign(jwt.Payload{}, tc.signer, tc.signOpts...) + if err != nil { + t.Fatal(err) + } + var pl jwt.Payload + if _, err = jwt.Verify(token, tc.verifier, &pl); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/none.go b/vendor/github.com/gbrlsnchs/jwt/v3/none.go new file mode 100644 index 0000000000..d1a4f1647d --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/none.go @@ -0,0 +1,20 @@ +package jwt + +var _ Algorithm = none{} + +type none struct{} + +// None returns a dull, unsecured algorithm. +func None() Algorithm { return none{} } + +// Name always returns "none". +func (none) Name() string { return "none" } + +// Sign always returns a nil byte slice and a nil error. +func (none) Sign(_ []byte) ([]byte, error) { return nil, nil } + +// Size always returns 0 and a nil error. +func (none) Size() int { return 0 } + +// Verify always returns a nil error. +func (none) Verify(_, _ []byte) error { return nil } diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/none_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/none_test.go new file mode 100644 index 0000000000..30619ec668 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/none_test.go @@ -0,0 +1 @@ +package jwt_test diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/payload.go b/vendor/github.com/gbrlsnchs/jwt/v3/payload.go new file mode 100644 index 0000000000..c8329a6ba4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/payload.go @@ -0,0 +1,12 @@ +package jwt + +// Payload is a JWT payload according to the RFC 7519. +type Payload struct { + Issuer string `json:"iss,omitempty"` + Subject string `json:"sub,omitempty"` + Audience Audience `json:"aud,omitempty"` + ExpirationTime *Time `json:"exp,omitempty"` + NotBefore *Time `json:"nbf,omitempty"` + IssuedAt *Time `json:"iat,omitempty"` + JWTID string `json:"jti,omitempty"` +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/raw_token.go b/vendor/github.com/gbrlsnchs/jwt/v3/raw_token.go new file mode 100644 index 0000000000..e393350af4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/raw_token.go @@ -0,0 +1,52 @@ +package jwt + +import ( + "errors" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +// ErrMalformed indicates a token doesn't have a valid format, as per the RFC 7519. +var ErrMalformed = errors.New("jwt: malformed token") + +// RawToken is a representation of a parsed JWT string. +type RawToken struct { + token []byte + sep1, sep2 int + + hd Header + alg Algorithm + + pl *Payload + vds []Validator +} + +func (rt *RawToken) header() []byte { return rt.token[:rt.sep1] } +func (rt *RawToken) headerPayload() []byte { return rt.token[:rt.sep2] } +func (rt *RawToken) payload() []byte { return rt.token[rt.sep1+1 : rt.sep2] } +func (rt *RawToken) sig() []byte { return rt.token[rt.sep2+1:] } + +func (rt *RawToken) setToken(token []byte, sep1, sep2 int) { + rt.sep1 = sep1 + rt.sep2 = sep1 + 1 + sep2 + rt.token = token +} + +func (rt *RawToken) decode(payload interface{}) (err error) { + if err = internal.Decode(rt.payload(), payload); err != nil { + return err + } + for _, vd := range rt.vds { + if err = vd(rt.pl); err != nil { + return err + } + } + return nil +} + +func (rt *RawToken) decodeHeader() error { + if err := internal.Decode(rt.header(), &rt.hd); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/resolver.go b/vendor/github.com/gbrlsnchs/jwt/v3/resolver.go new file mode 100644 index 0000000000..08f7767310 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/resolver.go @@ -0,0 +1,7 @@ +package jwt + +// Resolver is an Algorithm that needs to set some variables +// based on a Header before performing signing and verification. +type Resolver interface { + Resolve(Header) error +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/rsa_sha.go b/vendor/github.com/gbrlsnchs/jwt/v3/rsa_sha.go new file mode 100644 index 0000000000..792fd34feb --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/rsa_sha.go @@ -0,0 +1,146 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "errors" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrRSANilPrivKey is the error for trying to sign a JWT with a nil private key. + ErrRSANilPrivKey = errors.New("jwt: RSA private key is nil") + // ErrRSANilPubKey is the error for trying to verify a JWT with a nil public key. + ErrRSANilPubKey = errors.New("jwt: RSA public key is nil") + // ErrRSAVerification is the error for an invalid RSA signature. + ErrRSAVerification = errors.New("jwt: RSA verification failed") + + _ Algorithm = new(RSASHA) +) + +// RSAPrivateKey is an option to set a private key to the RSA-SHA algorithm. +func RSAPrivateKey(priv *rsa.PrivateKey) func(*RSASHA) { + return func(rs *RSASHA) { + rs.priv = priv + } +} + +// RSAPublicKey is an option to set a public key to the RSA-SHA algorithm. +func RSAPublicKey(pub *rsa.PublicKey) func(*RSASHA) { + return func(rs *RSASHA) { + rs.pub = pub + } +} + +// RSASHA is an algorithm that uses RSA to sign SHA hashes. +type RSASHA struct { + name string + priv *rsa.PrivateKey + pub *rsa.PublicKey + sha crypto.Hash + size int + pool *hashPool + opts *rsa.PSSOptions +} + +func newRSASHA(name string, opts []func(*RSASHA), sha crypto.Hash, pss bool) *RSASHA { + rs := RSASHA{ + name: name, // cache name + sha: sha, + pool: newHashPool(sha.New), + } + for _, opt := range opts { + opt(&rs) + } + if rs.pub == nil { + rs.pub = &rs.priv.PublicKey + } + rs.size = internal.RSASignatureSize(rs.pub) // cache size + if pss { + rs.opts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: sha, + } + } + return &rs +} + +// NewRS256 creates a new algorithm using RSA and SHA-256. +func NewRS256(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("RS256", opts, crypto.SHA256, false) +} + +// NewRS384 creates a new algorithm using RSA and SHA-384. +func NewRS384(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("RS384", opts, crypto.SHA384, false) +} + +// NewRS512 creates a new algorithm using RSA and SHA-512. +func NewRS512(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("RS512", opts, crypto.SHA512, false) +} + +// NewPS256 creates a new algorithm using RSA-PSS and SHA-256. +func NewPS256(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("PS256", opts, crypto.SHA256, true) +} + +// NewPS384 creates a new algorithm using RSA-PSS and SHA-384. +func NewPS384(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("PS384", opts, crypto.SHA384, true) +} + +// NewPS512 creates a new algorithm using RSA-PSS and SHA-512. +func NewPS512(opts ...func(*RSASHA)) *RSASHA { + return newRSASHA("PS512", opts, crypto.SHA512, true) +} + +// Name returns the algorithm's name. +func (rs *RSASHA) Name() string { + return rs.name +} + +// Sign signs headerPayload using either RSA-SHA or RSA-PSS-SHA algorithms. +func (rs *RSASHA) Sign(headerPayload []byte) ([]byte, error) { + if rs.priv == nil { + return nil, ErrRSANilPrivKey + } + sum, err := rs.pool.sign(headerPayload) + if err != nil { + return nil, err + } + if rs.opts != nil { + return rsa.SignPSS(rand.Reader, rs.priv, rs.sha, sum, rs.opts) + } + return rsa.SignPKCS1v15(rand.Reader, rs.priv, rs.sha, sum) +} + +// Size returns the signature's byte size. +func (rs *RSASHA) Size() int { + return rs.size +} + +// Verify verifies a signature based on headerPayload using either RSA-SHA or RSA-PSS-SHA. +func (rs *RSASHA) Verify(headerPayload, sig []byte) (err error) { + if rs.pub == nil { + return ErrRSANilPubKey + } + if sig, err = internal.DecodeToBytes(sig); err != nil { + return err + } + sum, err := rs.pool.sign(headerPayload) + if err != nil { + return err + } + if rs.opts != nil { + err = rsa.VerifyPSS(rs.pub, rs.sha, sum, sig, rs.opts) + } else { + err = rsa.VerifyPKCS1v15(rs.pub, rs.sha, sum, sig) + } + if err != nil { + return ErrRSAVerification + } + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/rsa_sha_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/rsa_sha_test.go new file mode 100644 index 0000000000..7c231f957c --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/rsa_sha_test.go @@ -0,0 +1,19 @@ +package jwt_test + +import ( + "crypto/rand" + "crypto/rsa" +) + +var ( + rsaPrivateKey1, rsaPublicKey1 = genRSAKeys() + rsaPrivateKey2, rsaPublicKey2 = genRSAKeys() +) + +func genRSAKeys() (*rsa.PrivateKey, *rsa.PublicKey) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + return priv, &priv.PublicKey +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/sign.go b/vendor/github.com/gbrlsnchs/jwt/v3/sign.go new file mode 100644 index 0000000000..6db5b369e4 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/sign.go @@ -0,0 +1,70 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" +) + +// SignOption is a functional option for signing. +type SignOption func(*Header) + +// ContentType sets the "cty" claim for a Header before signing. +func ContentType(cty string) SignOption { + return func(hd *Header) { + hd.ContentType = cty + } +} + +// KeyID sets the "kid" claim for a Header before signing. +func KeyID(kid string) SignOption { + return func(hd *Header) { + hd.KeyID = kid + } +} + +// Sign signs a payload with alg. +func Sign(payload interface{}, alg Algorithm, opts ...SignOption) ([]byte, error) { + var hd Header + for _, opt := range opts { + opt(&hd) + } + if rv, ok := alg.(Resolver); ok { + if err := rv.Resolve(hd); err != nil { + return nil, err + } + } + // Override some values or set them if empty. + hd.Algorithm = alg.Name() + hd.Type = "JWT" + // Marshal the header part of the JWT. + hb, err := json.Marshal(hd) + if err != nil { + return nil, err + } + + if payload == nil { + payload = Payload{} + } + // Marshal the claims part of the JWT. + pb, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + enc := base64.RawURLEncoding + h64len := enc.EncodedLen(len(hb)) + p64len := enc.EncodedLen(len(pb)) + sig64len := enc.EncodedLen(alg.Size()) + token := make([]byte, h64len+1+p64len+1+sig64len) + + enc.Encode(token, hb) + token[h64len] = '.' + enc.Encode(token[h64len+1:], pb) + sig, err := alg.Sign(token[:h64len+1+p64len]) + if err != nil { + return nil, err + } + token[h64len+1+p64len] = '.' + enc.Encode(token[h64len+1+p64len+1:], sig) + return token, nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/sign_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/sign_test.go new file mode 100644 index 0000000000..1a4976b676 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/sign_test.go @@ -0,0 +1,821 @@ +package jwt_test + +import ( + "reflect" + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +type testPayload struct { + jwt.Payload + String string `json:"string,omitempty"` + Int int `json:"int,omitempty"` +} + +var ( + now = time.Now() + tp = testPayload{ + Payload: jwt.Payload{ + Issuer: "gbrlsnchs", + Subject: "someone", + Audience: jwt.Audience{"https://golang.org", "https://jwt.io"}, + ExpirationTime: jwt.NumericDate(now.Add(24 * 30 * 12 * time.Hour)), + NotBefore: jwt.NumericDate(now.Add(30 * time.Minute)), + IssuedAt: jwt.NumericDate(now), + JWTID: "foobar", + }, + String: "foobar", + Int: 1337, + } +) + +func TestSign(t *testing.T) { + type testCase struct { + alg jwt.Algorithm + payload interface{} + + verifyAlg jwt.Algorithm + opts []func(*jwt.RawToken) + wantHeader jwt.Header + wantPayload testPayload + + signErr error + verifyErr error + } + testCases := map[string][]testCase{ + "HMAC": []testCase{ + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS512(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS512(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + }, + "RSA": []testCase{ + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + }, + "RSA-PSS": []testCase{ + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + }, + "ECDSA": []testCase{ + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es256PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es256PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es256PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es384PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es384PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es384PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPublicKey(es512PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es512PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPublicKey(es512PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + }, + "Ed25519": []testCase{ + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PublicKey(ed25519PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrEd25519Verification, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PublicKey(ed25519PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrEd25519Verification, + }, + }, + } + for k, v := range testCases { + t.Run(k, func(t *testing.T) { + for _, tc := range v { + t.Run(tc.alg.Name(), func(t *testing.T) { + token, err := jwt.Sign(tc.payload, tc.alg) + if want, got := tc.signErr, err; got != want { + t.Fatalf("want %v, got %v", want, got) + } + if err != nil { + return + } + + var ( + hd jwt.Header + payload testPayload + ) + hd, err = jwt.Verify(token, tc.verifyAlg, &payload) + if want, got := tc.verifyErr, err; got != want { + t.Fatalf("want %v, got %v", want, got) + } + if want, got := tc.wantHeader, hd; !reflect.DeepEqual(got, want) { + t.Errorf("want %#+v, got %#+v", want, got) + } + if want, got := tc.wantPayload, payload; !reflect.DeepEqual(got, want) { + t.Errorf("want %#+v, got %#+v", want, got) + } + }) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/time.go b/vendor/github.com/gbrlsnchs/jwt/v3/time.go new file mode 100644 index 0000000000..ed3bbbb8eb --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/time.go @@ -0,0 +1,46 @@ +package jwt + +import ( + "encoding/json" + "time" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +// Time is the allowed format for time, as per the RFC 7519. +type Time struct { + time.Time +} + +// NumericDate is a resolved Unix time. +func NumericDate(tt time.Time) *Time { + if tt.Before(internal.Epoch) { + tt = internal.Epoch + } + return &Time{time.Unix(tt.Unix(), 0)} // set time using Unix time +} + +// MarshalJSON implements a marshaling function for time-related claims. +func (t Time) MarshalJSON() ([]byte, error) { + if t.Before(internal.Epoch) { + return json.Marshal(0) + } + return json.Marshal(t.Unix()) +} + +// UnmarshalJSON implements an unmarshaling function for time-related claims. +func (t *Time) UnmarshalJSON(b []byte) error { + var unix *int64 + if err := json.Unmarshal(b, &unix); err != nil { + return err + } + if unix == nil { + return nil + } + tt := time.Unix(*unix, 0) + if tt.Before(internal.Epoch) { + tt = internal.Epoch + } + t.Time = tt + return nil +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/time_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/time_test.go new file mode 100644 index 0000000000..eccef0474a --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/time_test.go @@ -0,0 +1,72 @@ +package jwt_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" +) + +func TestTimeMarshalJSON(t *testing.T) { + now := time.Now() + testCases := []struct { + tt jwt.Time + want int64 + }{ + {jwt.Time{}, 0}, + {jwt.Time{now}, now.Unix()}, + {jwt.Time{now.Add(24 * time.Hour)}, now.Add(24 * time.Hour).Unix()}, + {jwt.Time{now.Add(24 * 30 * 12 * time.Hour)}, now.Add(24 * 30 * 12 * time.Hour).Unix()}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + b, err := tc.tt.MarshalJSON() + if err != nil { + t.Fatal(err) + } + var n int64 + if err = json.Unmarshal(b, &n); err != nil { + t.Fatal(err) + } + if want, got := tc.want, n; got != want { + t.Errorf("want %d, got %d", want, got) + } + }) + } +} + +func TestTimeUnmarshalJSON(t *testing.T) { + now := time.Now() + testCases := []struct { + n int64 + want jwt.Time + isNil bool + }{ + {now.Unix(), jwt.Time{now}, false}, + {internal.Epoch.Unix() - 1337, jwt.Time{internal.Epoch}, false}, + {internal.Epoch.Unix(), jwt.Time{internal.Epoch}, false}, + {internal.Epoch.Unix() + 1337, jwt.Time{internal.Epoch.Add(1337 * time.Second)}, false}, + {0, jwt.Time{}, true}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + var n *int64 + if !tc.isNil { + n = &tc.n + } + b, err := json.Marshal(n) + if err != nil { + t.Fatal(err) + } + var tt jwt.Time + if err = tt.UnmarshalJSON(b); err != nil { + t.Fatal(err) + } + if want, got := tc.want, tt; got.Unix() != want.Unix() { + t.Errorf("want %d, got %d", want.Unix(), got.Unix()) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/validators.go b/vendor/github.com/gbrlsnchs/jwt/v3/validators.go new file mode 100644 index 0000000000..f16b90f010 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/validators.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "errors" + "time" +) + +var ( + // ErrAudValidation is the error for an invalid "aud" claim. + ErrAudValidation = errors.New("jwt: aud claim is invalid") + // ErrExpValidation is the error for an invalid "exp" claim. + ErrExpValidation = errors.New("jwt: exp claim is invalid") + // ErrIatValidation is the error for an invalid "iat" claim. + ErrIatValidation = errors.New("jwt: iat claim is invalid") + // ErrIssValidation is the error for an invalid "iss" claim. + ErrIssValidation = errors.New("jwt: iss claim is invalid") + // ErrJtiValidation is the error for an invalid "jti" claim. + ErrJtiValidation = errors.New("jwt: jti claim is invalid") + // ErrNbfValidation is the error for an invalid "nbf" claim. + ErrNbfValidation = errors.New("jwt: nbf claim is invalid") + // ErrSubValidation is the error for an invalid "sub" claim. + ErrSubValidation = errors.New("jwt: sub claim is invalid") +) + +// Validator is a function that validates a Payload pointer. +type Validator func(*Payload) error + +// AudienceValidator validates the "aud" claim. +// It checks if at least one of the audiences in the JWT's payload is listed in aud. +func AudienceValidator(aud Audience) Validator { + return func(pl *Payload) error { + for _, serverAud := range aud { + for _, clientAud := range pl.Audience { + if clientAud == serverAud { + return nil + } + } + } + return ErrAudValidation + } +} + +// ExpirationTimeValidator validates the "exp" claim. +func ExpirationTimeValidator(now time.Time) Validator { + return func(pl *Payload) error { + if pl.ExpirationTime == nil || NumericDate(now).After(pl.ExpirationTime.Time) { + return ErrExpValidation + } + return nil + } +} + +// IssuedAtValidator validates the "iat" claim. +func IssuedAtValidator(now time.Time) Validator { + return func(pl *Payload) error { + if pl.IssuedAt != nil && NumericDate(now).Before(pl.IssuedAt.Time) { + return ErrIatValidation + } + return nil + } +} + +// IssuerValidator validates the "iss" claim. +func IssuerValidator(iss string) Validator { + return func(pl *Payload) error { + if pl.Issuer != iss { + return ErrIssValidation + } + return nil + } +} + +// IDValidator validates the "jti" claim. +func IDValidator(jti string) Validator { + return func(pl *Payload) error { + if pl.JWTID != jti { + return ErrJtiValidation + } + return nil + } +} + +// NotBeforeValidator validates the "nbf" claim. +func NotBeforeValidator(now time.Time) Validator { + return func(pl *Payload) error { + if pl.NotBefore != nil && NumericDate(now).Before(pl.NotBefore.Time) { + return ErrNbfValidation + } + return nil + } +} + +// SubjectValidator validates the "sub" claim. +func SubjectValidator(sub string) Validator { + return func(pl *Payload) error { + if pl.Subject != sub { + return ErrSubValidation + } + return nil + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/validators_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/validators_test.go new file mode 100644 index 0000000000..69ddee1633 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/validators_test.go @@ -0,0 +1,57 @@ +package jwt_test + +import ( + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" +) + +func TestValidators(t *testing.T) { + now := time.Now() + iat := jwt.NumericDate(now) + exp := jwt.NumericDate(now.Add(24 * time.Hour)) + nbf := jwt.NumericDate(now.Add(15 * time.Second)) + jti := "jti" + aud := jwt.Audience{"aud", "aud1", "aud2", "aud3"} + sub := "sub" + iss := "iss" + testCases := []struct { + claim string + pl *jwt.Payload + vl jwt.Validator + err error + }{ + {"iss", &jwt.Payload{Issuer: iss}, jwt.IssuerValidator("iss"), nil}, + {"iss", &jwt.Payload{Issuer: iss}, jwt.IssuerValidator("not_iss"), jwt.ErrIssValidation}, + {"sub", &jwt.Payload{Subject: sub}, jwt.SubjectValidator("sub"), nil}, + {"sub", &jwt.Payload{Subject: sub}, jwt.SubjectValidator("not_sub"), jwt.ErrSubValidation}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"aud"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"foo", "aud1"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"bar", "aud2"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"baz", "aud3"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"qux", "aud4"}), jwt.ErrAudValidation}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"not_aud"}), jwt.ErrAudValidation}, + {"exp", &jwt.Payload{ExpirationTime: exp}, jwt.ExpirationTimeValidator(now), nil}, + {"exp", &jwt.Payload{ExpirationTime: exp}, jwt.ExpirationTimeValidator(time.Unix(now.Unix()-int64(24*time.Hour), 0)), nil}, + {"exp", &jwt.Payload{ExpirationTime: exp}, jwt.ExpirationTimeValidator(time.Unix(now.Unix()+int64(24*time.Hour), 0)), jwt.ErrExpValidation}, + {"exp", &jwt.Payload{}, jwt.ExpirationTimeValidator(time.Now()), jwt.ErrExpValidation}, + {"nbf", &jwt.Payload{NotBefore: nbf}, jwt.NotBeforeValidator(now), jwt.ErrNbfValidation}, + {"nbf", &jwt.Payload{NotBefore: nbf}, jwt.NotBeforeValidator(time.Unix(now.Unix()+int64(15*time.Second), 0)), nil}, + {"nbf", &jwt.Payload{NotBefore: nbf}, jwt.NotBeforeValidator(time.Unix(now.Unix()-int64(15*time.Second), 0)), jwt.ErrNbfValidation}, + {"nbf", &jwt.Payload{}, jwt.NotBeforeValidator(time.Now()), nil}, + {"iat", &jwt.Payload{IssuedAt: iat}, jwt.IssuedAtValidator(now), nil}, + {"iat", &jwt.Payload{IssuedAt: iat}, jwt.IssuedAtValidator(time.Unix(now.Unix()+1, 0)), nil}, + {"iat", &jwt.Payload{IssuedAt: iat}, jwt.IssuedAtValidator(time.Unix(now.Unix()-1, 0)), jwt.ErrIatValidation}, + {"iat", &jwt.Payload{}, jwt.IssuedAtValidator(time.Now()), nil}, + {"jti", &jwt.Payload{JWTID: jti}, jwt.IDValidator("jti"), nil}, + {"jti", &jwt.Payload{JWTID: jti}, jwt.IDValidator("not_jti"), jwt.ErrJtiValidation}, + } + for _, tc := range testCases { + t.Run(tc.claim, func(t *testing.T) { + if want, got := tc.err, tc.vl(tc.pl); want != got { + t.Errorf("want %v, got %v", want, got) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/verify.go b/vendor/github.com/gbrlsnchs/jwt/v3/verify.go new file mode 100644 index 0000000000..ecbd9a8f5c --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/verify.go @@ -0,0 +1,72 @@ +package jwt + +import ( + "bytes" + "errors" +) + +// ErrAlgValidation indicates an incoming JWT's "alg" field mismatches the Validator's. +var ErrAlgValidation = errors.New(`"alg" field mismatch`) + +// VerifyOption is a functional option for verifying. +type VerifyOption func(*RawToken) error + +// Verify verifies a token's signature using alg. Before verification, opts is iterated and +// each option in it is run. +func Verify(token []byte, alg Algorithm, payload interface{}, opts ...VerifyOption) (Header, error) { + rt := &RawToken{ + alg: alg, + } + + sep1 := bytes.IndexByte(token, '.') + if sep1 < 0 { + return rt.hd, ErrMalformed + } + + cbytes := token[sep1+1:] + sep2 := bytes.IndexByte(cbytes, '.') + if sep2 < 0 { + return rt.hd, ErrMalformed + } + rt.setToken(token, sep1, sep2) + + var err error + if err = rt.decodeHeader(); err != nil { + return rt.hd, err + } + if rv, ok := alg.(Resolver); ok { + if err = rv.Resolve(rt.hd); err != nil { + return rt.hd, err + } + } + for _, opt := range opts { + if err = opt(rt); err != nil { + return rt.hd, err + } + } + if err = alg.Verify(rt.headerPayload(), rt.sig()); err != nil { + return rt.hd, err + } + return rt.hd, rt.decode(payload) +} + +// ValidateHeader checks whether the algorithm contained +// in the JOSE header is the same used by the algorithm. +func ValidateHeader(rt *RawToken) error { + if rt.alg.Name() != rt.hd.Algorithm { + return ErrAlgValidation + } + return nil +} + +// ValidatePayload runs validators against a Payload after it's been decoded. +func ValidatePayload(pl *Payload, vds ...Validator) VerifyOption { + return func(rt *RawToken) error { + rt.pl = pl + rt.vds = vds + return nil + } +} + +// Compile-time checks. +var _ VerifyOption = ValidateHeader diff --git a/vendor/github.com/gbrlsnchs/jwt/v3/verify_test.go b/vendor/github.com/gbrlsnchs/jwt/v3/verify_test.go new file mode 100644 index 0000000000..0bd4cee64b --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/v3/verify_test.go @@ -0,0 +1,790 @@ +package jwt_test + +import ( + "reflect" + "testing" + + "github.com/gbrlsnchs/jwt/v3" +) + +func TestVerify(t *testing.T) { + type testCase struct { + alg jwt.Algorithm + payload interface{} + + verifyAlg jwt.Algorithm + opts []func(*jwt.RawToken) + wantHeader jwt.Header + wantPayload testPayload + + signErr error + verifyErr error + } + testCases := map[string][]testCase{ + "HMAC": []testCase{ + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS256(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS384(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS384(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS512(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS512(hmacKey2), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + { + alg: jwt.NewHS512(hmacKey1), + payload: tp, + verifyAlg: jwt.NewHS256(hmacKey1), + wantHeader: jwt.Header{ + Algorithm: "HS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrHMACVerification, + }, + }, + "RSA": []testCase{ + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "RS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + }, + "RSA-PSS": []testCase{ + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS256(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewRS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS384(jwt.RSAPrivateKey(rsaPrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPublicKey(rsaPublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewPS512(jwt.RSAPrivateKey(rsaPrivateKey1)), + payload: tp, + verifyAlg: jwt.NewPS512(jwt.RSAPublicKey(rsaPublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "PS512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrRSAVerification, + }, + }, + "ECDSA": []testCase{ + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es256PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es256PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es256PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPrivateKey(es256PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES256", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es384PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES256(jwt.ECDSAPublicKey(es384PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es384PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPrivateKey(es384PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES384", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPublicKey(es512PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES384(jwt.ECDSAPublicKey(es512PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPublicKey(es512PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewES512(jwt.ECDSAPrivateKey(es512PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "ES512", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrECDSAVerification, + }, + }, + "Ed25519": []testCase{ + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PublicKey(ed25519PublicKey1)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: tp, + signErr: nil, + verifyErr: nil, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey2)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrEd25519Verification, + }, + { + alg: jwt.NewEd25519(jwt.Ed25519PrivateKey(ed25519PrivateKey1)), + payload: tp, + verifyAlg: jwt.NewEd25519(jwt.Ed25519PublicKey(ed25519PublicKey2)), + wantHeader: jwt.Header{ + Algorithm: "Ed25519", + Type: "JWT", + }, + wantPayload: testPayload{}, + signErr: nil, + verifyErr: jwt.ErrEd25519Verification, + }, + }, + } + for k, v := range testCases { + t.Run(k, func(t *testing.T) { + for _, tc := range v { + t.Run(tc.verifyAlg.Name(), func(t *testing.T) { + token, err := jwt.Sign(tc.payload, tc.alg) + if err != nil { + t.Fatal(err) + } + var pl testPayload + hd, err := jwt.Verify(token, tc.verifyAlg, &pl) + if want, got := tc.verifyErr, err; got != want { + t.Errorf("want %v, got %v", want, got) + } + if want, got := tc.wantHeader, hd; !reflect.DeepEqual(got, want) { + t.Errorf("want %#+v, got %#+v", want, got) + } + if want, got := tc.wantPayload, pl; !reflect.DeepEqual(got, want) { + t.Errorf("want %#+v, got %#+v", want, got) + } + }) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/validators.go b/vendor/github.com/gbrlsnchs/jwt/validators.go new file mode 100644 index 0000000000..0cfa0b47e1 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/validators.go @@ -0,0 +1,102 @@ +package jwt + +import ( + "time" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +var ( + // ErrAudValidation is the error for an invalid "aud" claim. + ErrAudValidation = internal.NewError("jwt: aud claim is invalid") + // ErrExpValidation is the error for an invalid "exp" claim. + ErrExpValidation = internal.NewError("jwt: exp claim is invalid") + // ErrIatValidation is the error for an invalid "iat" claim. + ErrIatValidation = internal.NewError("jwt: iat claim is invalid") + // ErrIssValidation is the error for an invalid "iss" claim. + ErrIssValidation = internal.NewError("jwt: iss claim is invalid") + // ErrJtiValidation is the error for an invalid "jti" claim. + ErrJtiValidation = internal.NewError("jwt: jti claim is invalid") + // ErrNbfValidation is the error for an invalid "nbf" claim. + ErrNbfValidation = internal.NewError("jwt: nbf claim is invalid") + // ErrSubValidation is the error for an invalid "sub" claim. + ErrSubValidation = internal.NewError("jwt: sub claim is invalid") +) + +// Validator is a function that validates a Payload pointer. +type Validator func(*Payload) error + +// AudienceValidator validates the "aud" claim. +// It checks if at least one of the audiences in the JWT's payload is listed in aud. +func AudienceValidator(aud Audience) Validator { + return func(pl *Payload) error { + for _, serverAud := range aud { + for _, clientAud := range pl.Audience { + if clientAud == serverAud { + return nil + } + } + } + return ErrAudValidation + } +} + +// ExpirationTimeValidator validates the "exp" claim. +func ExpirationTimeValidator(now time.Time) Validator { + return func(pl *Payload) error { + if pl.ExpirationTime == nil || NumericDate(now).After(pl.ExpirationTime.Time) { + return ErrExpValidation + } + return nil + } +} + +// IssuedAtValidator validates the "iat" claim. +func IssuedAtValidator(now time.Time) Validator { + return func(pl *Payload) error { + if pl.IssuedAt != nil && NumericDate(now).Before(pl.IssuedAt.Time) { + return ErrIatValidation + } + return nil + } +} + +// IssuerValidator validates the "iss" claim. +func IssuerValidator(iss string) Validator { + return func(pl *Payload) error { + if pl.Issuer != iss { + return ErrIssValidation + } + return nil + } +} + +// IDValidator validates the "jti" claim. +func IDValidator(jti string) Validator { + return func(pl *Payload) error { + if pl.JWTID != jti { + return ErrJtiValidation + } + return nil + } +} + +// NotBeforeValidator validates the "nbf" claim. +func NotBeforeValidator(now time.Time) Validator { + return func(pl *Payload) error { + if pl.NotBefore != nil && NumericDate(now).Before(pl.NotBefore.Time) { + return ErrNbfValidation + } + return nil + } +} + +// SubjectValidator validates the "sub" claim. +func SubjectValidator(sub string) Validator { + return func(pl *Payload) error { + if pl.Subject != sub { + return ErrSubValidation + } + return nil + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/validators_test.go b/vendor/github.com/gbrlsnchs/jwt/validators_test.go new file mode 100644 index 0000000000..86c64e7427 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/validators_test.go @@ -0,0 +1,59 @@ +package jwt_test + +import ( + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +func TestValidators(t *testing.T) { + now := time.Now() + iat := jwt.NumericDate(now) + exp := jwt.NumericDate(now.Add(24 * time.Hour)) + nbf := jwt.NumericDate(now.Add(15 * time.Second)) + jti := "jti" + aud := jwt.Audience{"aud", "aud1", "aud2", "aud3"} + sub := "sub" + iss := "iss" + testCases := []struct { + claim string + pl *jwt.Payload + vl jwt.Validator + err error + }{ + {"iss", &jwt.Payload{Issuer: iss}, jwt.IssuerValidator("iss"), nil}, + {"iss", &jwt.Payload{Issuer: iss}, jwt.IssuerValidator("not_iss"), jwt.ErrIssValidation}, + {"sub", &jwt.Payload{Subject: sub}, jwt.SubjectValidator("sub"), nil}, + {"sub", &jwt.Payload{Subject: sub}, jwt.SubjectValidator("not_sub"), jwt.ErrSubValidation}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"aud"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"foo", "aud1"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"bar", "aud2"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"baz", "aud3"}), nil}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"qux", "aud4"}), jwt.ErrAudValidation}, + {"aud", &jwt.Payload{Audience: aud}, jwt.AudienceValidator(jwt.Audience{"not_aud"}), jwt.ErrAudValidation}, + {"exp", &jwt.Payload{ExpirationTime: exp}, jwt.ExpirationTimeValidator(now), nil}, + {"exp", &jwt.Payload{ExpirationTime: exp}, jwt.ExpirationTimeValidator(time.Unix(now.Unix()-int64(24*time.Hour), 0)), nil}, + {"exp", &jwt.Payload{ExpirationTime: exp}, jwt.ExpirationTimeValidator(time.Unix(now.Unix()+int64(24*time.Hour), 0)), jwt.ErrExpValidation}, + {"exp", &jwt.Payload{}, jwt.ExpirationTimeValidator(time.Now()), jwt.ErrExpValidation}, + {"nbf", &jwt.Payload{NotBefore: nbf}, jwt.NotBeforeValidator(now), jwt.ErrNbfValidation}, + {"nbf", &jwt.Payload{NotBefore: nbf}, jwt.NotBeforeValidator(time.Unix(now.Unix()+int64(15*time.Second), 0)), nil}, + {"nbf", &jwt.Payload{NotBefore: nbf}, jwt.NotBeforeValidator(time.Unix(now.Unix()-int64(15*time.Second), 0)), jwt.ErrNbfValidation}, + {"nbf", &jwt.Payload{}, jwt.NotBeforeValidator(time.Now()), nil}, + {"iat", &jwt.Payload{IssuedAt: iat}, jwt.IssuedAtValidator(now), nil}, + {"iat", &jwt.Payload{IssuedAt: iat}, jwt.IssuedAtValidator(time.Unix(now.Unix()+1, 0)), nil}, + {"iat", &jwt.Payload{IssuedAt: iat}, jwt.IssuedAtValidator(time.Unix(now.Unix()-1, 0)), jwt.ErrIatValidation}, + {"iat", &jwt.Payload{}, jwt.IssuedAtValidator(time.Now()), nil}, + {"jti", &jwt.Payload{JWTID: jti}, jwt.IDValidator("jti"), nil}, + {"jti", &jwt.Payload{JWTID: jti}, jwt.IDValidator("not_jti"), jwt.ErrJtiValidation}, + } + for _, tc := range testCases { + t.Run(tc.claim, func(t *testing.T) { + if want, got := tc.err, tc.vl(tc.pl); !internal.ErrorIs(got, want) { + t.Errorf(cmp.Diff(want, got)) + } + }) + } +} diff --git a/vendor/github.com/gbrlsnchs/jwt/verify.go b/vendor/github.com/gbrlsnchs/jwt/verify.go new file mode 100644 index 0000000000..ae6fc49585 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/verify.go @@ -0,0 +1,73 @@ +package jwt + +import ( + "bytes" + + "github.com/gbrlsnchs/jwt/v3/internal" +) + +// ErrAlgValidation indicates an incoming JWT's "alg" field mismatches the Validator's. +var ErrAlgValidation = internal.NewError(`invalid "alg" field`) + +// VerifyOption is a functional option for verifying. +type VerifyOption func(*RawToken) error + +// Verify verifies a token's signature using alg. Before verification, opts is iterated and +// each option in it is run. +func Verify(token []byte, alg Algorithm, payload interface{}, opts ...VerifyOption) (Header, error) { + rt := &RawToken{ + alg: alg, + } + + sep1 := bytes.IndexByte(token, '.') + if sep1 < 0 { + return rt.hd, ErrMalformed + } + + cbytes := token[sep1+1:] + sep2 := bytes.IndexByte(cbytes, '.') + if sep2 < 0 { + return rt.hd, ErrMalformed + } + rt.setToken(token, sep1, sep2) + + var err error + if err = rt.decodeHeader(); err != nil { + return rt.hd, err + } + if rv, ok := alg.(Resolver); ok { + if err = rv.Resolve(rt.hd); err != nil { + return rt.hd, err + } + } + for _, opt := range opts { + if err = opt(rt); err != nil { + return rt.hd, err + } + } + if err = alg.Verify(rt.headerPayload(), rt.sig()); err != nil { + return rt.hd, err + } + return rt.hd, rt.decode(payload) +} + +// ValidateHeader checks whether the algorithm contained +// in the JOSE header is the same used by the algorithm. +func ValidateHeader(rt *RawToken) error { + if rt.alg.Name() != rt.hd.Algorithm { + return internal.Errorf("jwt: %q: %w", rt.hd.Algorithm, ErrAlgValidation) + } + return nil +} + +// ValidatePayload runs validators against a Payload after it's been decoded. +func ValidatePayload(pl *Payload, vds ...Validator) VerifyOption { + return func(rt *RawToken) error { + rt.pl = pl + rt.vds = vds + return nil + } +} + +// Compile-time checks. +var _ VerifyOption = ValidateHeader diff --git a/vendor/github.com/gbrlsnchs/jwt/verify_test.go b/vendor/github.com/gbrlsnchs/jwt/verify_test.go new file mode 100644 index 0000000000..b1c3b9bd69 --- /dev/null +++ b/vendor/github.com/gbrlsnchs/jwt/verify_test.go @@ -0,0 +1,163 @@ +package jwt_test + +import ( + "fmt" + "testing" + "time" + + "github.com/gbrlsnchs/jwt/v3" + "github.com/gbrlsnchs/jwt/v3/internal" + "github.com/google/go-cmp/cmp" +) + +type testPayload struct { + jwt.Payload + String string `json:"string,omitempty"` + Int int `json:"int,omitempty"` +} + +type testCase struct { + alg jwt.Algorithm + payload interface{} + + verifyAlg jwt.Algorithm + opts []func(*jwt.RawToken) + wantHeader jwt.Header + wantPayload testPayload + + signErr error + verifyErr error +} + +var ( + now = time.Now() + tp = testPayload{ + Payload: jwt.Payload{ + Issuer: "gbrlsnchs", + Subject: "someone", + Audience: jwt.Audience{"https://golang.org", "https://jwt.io"}, + ExpirationTime: jwt.NumericDate(now.Add(24 * 30 * 12 * time.Hour)), + NotBefore: jwt.NumericDate(now.Add(30 * time.Minute)), + IssuedAt: jwt.NumericDate(now), + JWTID: "foobar", + }, + String: "foobar", + Int: 1337, + } +) + +func TestVerify(t *testing.T) { + testCases := map[string][]testCase{ + "HMAC": hmacTestCases, + "RSA": rsaTestCases, + "RSA-PSS": rsaPSSTestCases, + "ECDSA": ecdsaTestCases, + "Ed25519": ed25519TestCases, + } + for k, v := range testCases { + t.Run(k, func(t *testing.T) { + for _, tc := range v { + t.Run(tc.verifyAlg.Name(), func(t *testing.T) { + token, err := jwt.Sign(tc.payload, tc.alg) + if err != nil { + t.Fatal(err) + } + var pl testPayload + hd, err := jwt.Verify(token, tc.verifyAlg, &pl) + if want, got := tc.verifyErr, err; got != want { + t.Errorf("jwt.Verify err mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + if want, got := tc.wantHeader, hd; !cmp.Equal(got, want) { + t.Errorf("jwt.Verify header mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + if want, got := tc.wantPayload, pl; !cmp.Equal(got, want) { + t.Errorf("jwt.Verify payload mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + }) + } + }) + } + + t.Run("non-JSON payload", func(t *testing.T) { + var ( + header = "eyJ0eXAiOiJKV1QiLCJhbGciOiJub25lIn0" // {"typ":"JWT","alg":"none"} + payload = "NTcwMDU" // 57005 + token = fmt.Sprintf("%s.%s.", header, payload) + v interface{} + ) + _, err := jwt.Verify([]byte(token), jwt.None(), &v) + if want, got := jwt.ErrNotJSONObject, err; !internal.ErrorIs(got, want) { + t.Errorf("jwt.Verify JSON payload mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + }) +} + +func TestValidatePayload(t *testing.T) { + now := time.Now() + testCases := []struct { + pl *jwt.Payload + vds []jwt.Validator + err error + }{ + { + pl: &jwt.Payload{ + ExpirationTime: jwt.NumericDate(now.Add(1 * time.Second)), + }, + vds: []jwt.Validator{jwt.ExpirationTimeValidator(now)}, + err: nil, + }, + { + pl: &jwt.Payload{ + ExpirationTime: jwt.NumericDate(now.Add(1 * time.Second)), + }, + vds: []jwt.Validator{jwt.ExpirationTimeValidator(now.Add(15 * time.Second))}, + err: jwt.ErrExpValidation, + }, + { + pl: &jwt.Payload{ + Subject: "test", + ExpirationTime: jwt.NumericDate(now.Add(1 * time.Second)), + }, + vds: []jwt.Validator{ + jwt.SubjectValidator("test"), + jwt.ExpirationTimeValidator(now), + }, + err: nil, + }, + { + pl: &jwt.Payload{ + Subject: "foo", + ExpirationTime: jwt.NumericDate(now.Add(1 * time.Second)), + }, + vds: []jwt.Validator{ + jwt.SubjectValidator("bar"), + jwt.ExpirationTimeValidator(now), + }, + err: jwt.ErrSubValidation, + }, + { + pl: &jwt.Payload{ + Subject: "test", + ExpirationTime: jwt.NumericDate(now.Add(1 * time.Second)), + }, + vds: []jwt.Validator{ + jwt.SubjectValidator("test"), + jwt.ExpirationTimeValidator(now.Add(15 * time.Second)), + }, + err: jwt.ErrExpValidation, + }, + } + hs256 := jwt.NewHS256([]byte("secret")) + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + token, err := jwt.Sign(tc.pl, hs256) + if err != nil { + t.Fatal(err) + } + _, err = jwt.Verify(token, hs256, tc.pl, jwt.ValidatePayload(tc.pl, tc.vds...)) + if want, got := tc.err, err; !internal.ErrorIs(got, want) { + t.Errorf("jwt.Verify with validators mismatch (-want +got):\n%s", cmp.Diff(want, got)) + } + }) + } +} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 0000000000..3d97fc7a29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 0000000000..1b4f6c208a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000000..f57de90da8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 0000000000..00d65f3277 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 0000000000..a26b046d94 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 0000000000..24552483c6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 0000000000..63b0f08bef --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 0000000000..35b882c09a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 0000000000..fe1bd7d904 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 0000000000..93464c91cf --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 0000000000..e748e1730e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 0000000000..9581ccd304 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,205 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 0000000000..0f5fb173e9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 0000000000..d4db5a1c14 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 0000000000..341c6f57f5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 0000000000..6f1ae120ec --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,389 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 0000000000..80db1c155b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,973 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 0000000000..b3aa39190a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 0000000000..f48a756761 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000000..b6cad90834 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 0000000000..7ffd3c29d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000000..d55a335d94 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 0000000000..aca8eed02a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 0000000000..28da1475fb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,610 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + if isOneofMessage { + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 0000000000..40ea3dd935 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 0000000000..5a5fd93f7c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 0000000000..f8babdefab --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3009 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 0000000000..997f57c1e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 0000000000..60dcf70d1e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,676 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000000..937229386a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2249 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 0000000000..00d6c7ad93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 0000000000..87416afe95 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,930 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 0000000000..1d6c6aa0e4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 0000000000..1ce0be2fa9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 0000000000..9324f6542b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 0000000000..38439fa990 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 0000000000..b175d1b642 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 0000000000..c1cf7bf85e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 0000000000..eac1c7664f --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/golang/groupcache/lru/lru_test.go b/vendor/github.com/golang/groupcache/lru/lru_test.go new file mode 100644 index 0000000000..a14f439e87 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru_test.go @@ -0,0 +1,97 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lru + +import ( + "fmt" + "testing" +) + +type simpleStruct struct { + int + string +} + +type complexStruct struct { + int + simpleStruct +} + +var getTests = []struct { + name string + keyToAdd interface{} + keyToGet interface{} + expectedOk bool +}{ + {"string_hit", "myKey", "myKey", true}, + {"string_miss", "myKey", "nonsense", false}, + {"simple_struct_hit", simpleStruct{1, "two"}, simpleStruct{1, "two"}, true}, + {"simple_struct_miss", simpleStruct{1, "two"}, simpleStruct{0, "noway"}, false}, + {"complex_struct_hit", complexStruct{1, simpleStruct{2, "three"}}, + complexStruct{1, simpleStruct{2, "three"}}, true}, +} + +func TestGet(t *testing.T) { + for _, tt := range getTests { + lru := New(0) + lru.Add(tt.keyToAdd, 1234) + val, ok := lru.Get(tt.keyToGet) + if ok != tt.expectedOk { + t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok) + } else if ok && val != 1234 { + t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val) + } + } +} + +func TestRemove(t *testing.T) { + lru := New(0) + lru.Add("myKey", 1234) + if val, ok := lru.Get("myKey"); !ok { + t.Fatal("TestRemove returned no match") + } else if val != 1234 { + t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val) + } + + lru.Remove("myKey") + if _, ok := lru.Get("myKey"); ok { + t.Fatal("TestRemove returned a removed entry") + } +} + +func TestEvict(t *testing.T) { + evictedKeys := make([]Key, 0) + onEvictedFun := func(key Key, value interface{}) { + evictedKeys = append(evictedKeys, key) + } + + lru := New(20) + lru.OnEvicted = onEvictedFun + for i := 0; i < 22; i++ { + lru.Add(fmt.Sprintf("myKey%d", i), 1234) + } + + if len(evictedKeys) != 2 { + t.Fatalf("got %d evicted keys; want 2", len(evictedKeys)) + } + if evictedKeys[0] != Key("myKey0") { + t.Fatalf("got %v in first evicted key; want %s", evictedKeys[0], "myKey0") + } + if evictedKeys[1] != Key("myKey1") { + t.Fatalf("got %v in second evicted key; want %s", evictedKeys[1], "myKey1") + } +} diff --git a/vendor/github.com/ipfs/bbloom/.travis.yml b/vendor/github.com/ipfs/bbloom/.travis.yml new file mode 100644 index 0000000000..4cfe98c242 --- /dev/null +++ b/vendor/github.com/ipfs/bbloom/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/bbloom/README.md b/vendor/github.com/ipfs/bbloom/README.md new file mode 100644 index 0000000000..46e5ec75eb --- /dev/null +++ b/vendor/github.com/ipfs/bbloom/README.md @@ -0,0 +1,129 @@ +## bbloom: a bitset Bloom filter for go/golang +=== + +package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. + +NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom + +=== + +changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. + +This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". +Nonetheless bbloom should work with any other form of entries. + +~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~ + +Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash ) + +Minimum hashset size is: 512 ([4]uint64; will be set automatically). + +###install + +```sh +go get github.com/AndreasBriese/bbloom +``` + +###test ++ change to folder ../bbloom ++ create wordlist in file "words.txt" (you might use `python permut.py`) ++ run 'go test -bench=.' within the folder + +```go +go test -bench=. +``` + +~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~ + +using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively) + +### usage + +after installation add + +```go +import ( + ... + "github.com/AndreasBriese/bbloom" + ... + ) +``` + +at your header. In the program use + +```go +// create a bloom filter for 65536 items and 1 % wrong-positive ratio +bf := bbloom.New(float64(1<<16), float64(0.01)) + +// or +// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly +// bf = bbloom.New(float64(650000), float64(7)) +// or +bf = bbloom.New(650000.0, 7.0) + +// add one item +bf.Add([]byte("butter")) + +// Number of elements added is exposed now +// Note: ElemNum will not be included in JSON export (for compatability to older version) +nOfElementsInFilter := bf.ElemNum + +// check if item is in the filter +isIn := bf.Has([]byte("butter")) // should be true +isNotIn := bf.Has([]byte("Butter")) // should be false + +// 'add only if item is new' to the bloomfilter +added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set +added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new + +// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS +// add one item +bf.AddTS([]byte("peanutbutter")) +// check if item is in the filter +isIn = bf.HasTS([]byte("peanutbutter")) // should be true +isNotIn = bf.HasTS([]byte("peanutButter")) // should be false +// 'add only if item is new' to the bloomfilter +added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set +added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new + +// convert to JSON ([]byte) +Json := bf.JSONMarshal() + +// bloomfilters Mutex is exposed for external un-/locking +// i.e. mutex lock while doing JSON conversion +bf.Mtx.Lock() +Json = bf.JSONMarshal() +bf.Mtx.Unlock() + +// restore a bloom filter from storage +bfNew, _ := bbloom.JSONUnmarshal(Json) + +isInNew := bfNew.Has([]byte("butter")) // should be true +isNotInNew := bfNew.Has([]byte("Butter")) // should be false + +``` + +to work with the bloom filter. + +### why 'fast'? + +It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: + + + Bloom filter (filter size 524288, 7 hashlocs) + github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op) + github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op) + github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op) + github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op) + + github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op) + github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op) + github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op) + github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op) + github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op) + github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op) + +(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz) + + +With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. diff --git a/vendor/github.com/ipfs/bbloom/bbloom.go b/vendor/github.com/ipfs/bbloom/bbloom.go new file mode 100644 index 0000000000..36f12e0dea --- /dev/null +++ b/vendor/github.com/ipfs/bbloom/bbloom.go @@ -0,0 +1,326 @@ +// The MIT License (MIT) +// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt + +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package bbloom + +import ( + "encoding/binary" + "encoding/json" + "errors" + "log" + "math" + "math/bits" + "sync" +) + +func getSize(ui64 uint64) (size uint64, exponent uint64) { + if ui64 < uint64(512) { + ui64 = uint64(512) + } + size = uint64(1) + for size < ui64 { + size <<= 1 + exponent++ + } + return size, exponent +} + +func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) { + size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2) + locs := math.Ceil(float64(0.69314718056) * size / numEntries) + return uint64(size), uint64(locs) +} + +var ErrUsage = errors.New("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(ratio_of_false_positives)) i.e. New(float64(1000), float64(0.03))") +var ErrInvalidParms = errors.New("One of parameters was outside of allowed range") + +// New +// returns a new bloomfilter +func New(params ...float64) (bloomfilter *Bloom, err error) { + var entries, locs uint64 + if len(params) == 2 { + if params[0] < 0 || params[1] < 0 { + return nil, ErrInvalidParms + } + if params[1] < 1 { + entries, locs = calcSizeByWrongPositives(math.Max(params[0], 1), params[1]) + } else { + entries, locs = uint64(params[0]), uint64(params[1]) + } + } else { + return nil, ErrUsage + } + size, exponent := getSize(uint64(entries)) + bloomfilter = &Bloom{ + sizeExp: exponent, + size: size - 1, + setLocs: locs, + shift: 64 - exponent, + bitset: make([]uint64, size>>6), + } + return bloomfilter, nil +} + +// NewWithBoolset +// takes a []byte slice and number of locs per entry +// returns the bloomfilter with a bitset populated according to the input []byte +func NewWithBoolset(bs []byte, locs uint64) (bloomfilter *Bloom) { + bloomfilter, err := New(float64(len(bs)<<3), float64(locs)) + if err != nil { + panic(err) // Should never happen + } + for i := range bloomfilter.bitset { + bloomfilter.bitset[i] = binary.BigEndian.Uint64((bs)[i<<3:]) + } + return bloomfilter +} + +// bloomJSONImExport +// Im/Export structure used by JSONMarshal / JSONUnmarshal +type bloomJSONImExport struct { + FilterSet []byte + SetLocs uint64 +} + +// +// Bloom filter +type Bloom struct { + Mtx sync.RWMutex + bitset []uint64 + sizeExp uint64 + size uint64 + setLocs uint64 + shift uint64 + + content uint64 +} + +// ElementsAdded returns the number of elements added to the bloom filter. +func (bl *Bloom) ElementsAdded() uint64 { + return bl.content +} + +// <--- http://www.cse.yorku.ca/~oz/hash.html +// modified Berkeley DB Hash (32bit) +// hash is casted to l, h = 16bit fragments +// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { +// hash := uint64(len(*b)) +// for _, c := range *b { +// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash +// } +// h = hash >> bl.shift +// l = hash << bl.shift >> bl.shift +// return l, h +// } + +// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm() +// https://131002.net/siphash/ +// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash + +// Add +// set the bit(s) for entry; Adds an entry to the Bloom filter +func (bl *Bloom) Add(entry []byte) { + bl.content++ + l, h := bl.sipHash(entry) + for i := uint64(0); i < (*bl).setLocs; i++ { + bl.set((h + i*l) & (*bl).size) + } +} + +// AddTS +// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry +func (bl *Bloom) AddTS(entry []byte) { + bl.Mtx.Lock() + bl.Add(entry) + bl.Mtx.Unlock() +} + +// Has +// check if bit(s) for entry is/are set +// returns true if the entry was added to the Bloom Filter +func (bl *Bloom) Has(entry []byte) bool { + l, h := bl.sipHash(entry) + res := true + for i := uint64(0); i < bl.setLocs; i++ { + res = res && bl.isSet((h+i*l)&bl.size) + // Branching here (early escape) is not worth it + // This is my conclusion from benchmarks + // (prevents loop unrolling) + // if !res { + // return false + // } + } + return res +} + +// HasTS +// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry +func (bl *Bloom) HasTS(entry []byte) bool { + bl.Mtx.RLock() + has := bl.Has(entry[:]) + bl.Mtx.RUnlock() + return has +} + +// AddIfNotHas +// Only Add entry if it's not present in the bloomfilter +// returns true if entry was added +// returns false if entry was allready registered in the bloomfilter +func (bl *Bloom) AddIfNotHas(entry []byte) (added bool) { + l, h := bl.sipHash(entry) + contained := true + for i := uint64(0); i < bl.setLocs; i++ { + prev := bl.getSet((h + i*l) & bl.size) + contained = contained && prev + } + if !contained { + bl.content++ + } + return !contained +} + +// AddIfNotHasTS +// Tread safe: Only Add entry if it's not present in the bloomfilter +// returns true if entry was added +// returns false if entry was allready registered in the bloomfilter +func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) { + bl.Mtx.Lock() + added = bl.AddIfNotHas(entry[:]) + bl.Mtx.Unlock() + return added +} + +// Clear +// resets the Bloom filter +func (bl *Bloom) Clear() { + bs := bl.bitset // important performance optimization. + for i := range bs { + bs[i] = 0 + } + bl.content = 0 +} + +// ClearTS clears the bloom filter (thread safe). +func (bl *Bloom) ClearTS() { + bl.Mtx.Lock() + bl.Clear() + bl.Mtx.Unlock() +} + +func (bl *Bloom) set(idx uint64) { + bl.bitset[idx>>6] |= 1 << (idx % 64) +} + +func (bl *Bloom) getSet(idx uint64) bool { + cur := bl.bitset[idx>>6] + bit := uint64(1 << (idx % 64)) + bl.bitset[idx>>6] = cur | bit + return (cur & bit) > 0 +} + +func (bl *Bloom) isSet(idx uint64) bool { + return bl.bitset[idx>>6]&(1<<(idx%64)) > 0 +} + +func (bl *Bloom) marshal() bloomJSONImExport { + bloomImEx := bloomJSONImExport{} + bloomImEx.SetLocs = uint64(bl.setLocs) + bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3) + for i, w := range bl.bitset { + binary.BigEndian.PutUint64(bloomImEx.FilterSet[i<<3:], w) + } + return bloomImEx +} + +// JSONMarshal +// returns JSON-object (type bloomJSONImExport) as []byte +func (bl *Bloom) JSONMarshal() []byte { + data, err := json.Marshal(bl.marshal()) + if err != nil { + log.Fatal("json.Marshal failed: ", err) + } + return data +} + +// JSONMarshalTS is a thread-safe version of JSONMarshal +func (bl *Bloom) JSONMarshalTS() []byte { + bl.Mtx.RLock() + export := bl.marshal() + bl.Mtx.RUnlock() + data, err := json.Marshal(export) + if err != nil { + log.Fatal("json.Marshal failed: ", err) + } + return data +} + +// JSONUnmarshal +// takes JSON-Object (type bloomJSONImExport) as []bytes +// returns bloom32 / bloom64 object +func JSONUnmarshal(dbData []byte) (*Bloom, error) { + bloomImEx := bloomJSONImExport{} + err := json.Unmarshal(dbData, &bloomImEx) + if err != nil { + return nil, err + } + bf := NewWithBoolset(bloomImEx.FilterSet, bloomImEx.SetLocs) + return bf, nil +} + +// FillRatio returns the fraction of bits set. +func (bl *Bloom) FillRatio() float64 { + count := uint64(0) + for _, b := range bl.bitset { + count += uint64(bits.OnesCount64(b)) + } + return float64(count) / float64(bl.size+1) +} + +// FillRatioTS is a thread-save version of FillRatio +func (bl *Bloom) FillRatioTS() float64 { + bl.Mtx.RLock() + fr := bl.FillRatio() + bl.Mtx.RUnlock() + return fr +} + +// // alternative hashFn +// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) { +// h64 := fnv.New64a() +// h64.Write(*b) +// hash := h64.Sum64() +// h = hash >> 32 +// l = hash << 32 >> 32 +// return l, h +// } +// +// // <-- http://partow.net/programming/hashfunctions/index.html +// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3, +// // under the topic of sorting and search chapter 6.4. +// // modified to fit with boolset-length +// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) { +// hash := uint64(len(*b)) +// for _, c := range *b { +// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c) +// } +// h = hash >> bl.shift +// l = hash << bl.sizeExp >> bl.sizeExp +// return l, h +// } diff --git a/vendor/github.com/ipfs/bbloom/go.mod b/vendor/github.com/ipfs/bbloom/go.mod new file mode 100644 index 0000000000..d215f4f103 --- /dev/null +++ b/vendor/github.com/ipfs/bbloom/go.mod @@ -0,0 +1,3 @@ +module github.com/ipfs/bbloom + +go 1.12 diff --git a/vendor/github.com/ipfs/bbloom/package.json b/vendor/github.com/ipfs/bbloom/package.json new file mode 100644 index 0000000000..3d42cf9c85 --- /dev/null +++ b/vendor/github.com/ipfs/bbloom/package.json @@ -0,0 +1,15 @@ +{ + "author": "AndreasBriese", + "bugs": { + "url": "https://github.com/ipfs/bbloom" + }, + "gx": { + "dvcsimport": "github.com/ipfs/bbloom" + }, + "gxVersion": "0.7.0", + "language": "go", + "license": "MIT", + "name": "bbloom", + "version": "0.1.2" +} + diff --git a/vendor/github.com/ipfs/bbloom/sipHash.go b/vendor/github.com/ipfs/bbloom/sipHash.go new file mode 100644 index 0000000000..4f2755ca9e --- /dev/null +++ b/vendor/github.com/ipfs/bbloom/sipHash.go @@ -0,0 +1,225 @@ +// Written in 2012 by Dmitry Chestnykh. +// +// To the extent possible under law, the author have dedicated all copyright +// and related and neighboring rights to this software to the public domain +// worldwide. This software is distributed without any warranty. +// http://creativecommons.org/publicdomain/zero/1.0/ +// +// Package siphash implements SipHash-2-4, a fast short-input PRF +// created by Jean-Philippe Aumasson and Daniel J. Bernstein. + +package bbloom + +// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit +// parts of 128-bit key: k0 and k1. +func (bl *Bloom) sipHash(p []byte) (l, h uint64) { + // Initialization. + v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575 + v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d + v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261 + v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573 + t := uint64(len(p)) << 56 + + // Compression. + for len(p) >= 8 { + + m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | + uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 + + v3 ^= m + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + v0 ^= m + p = p[8:] + } + + // Compress last block. + switch len(p) { + case 7: + t |= uint64(p[6]) << 48 + fallthrough + case 6: + t |= uint64(p[5]) << 40 + fallthrough + case 5: + t |= uint64(p[4]) << 32 + fallthrough + case 4: + t |= uint64(p[3]) << 24 + fallthrough + case 3: + t |= uint64(p[2]) << 16 + fallthrough + case 2: + t |= uint64(p[1]) << 8 + fallthrough + case 1: + t |= uint64(p[0]) + } + + v3 ^= t + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + v0 ^= t + + // Finalization. + v2 ^= 0xff + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 3. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // Round 4. + v0 += v1 + v1 = v1<<13 | v1>>51 + v1 ^= v0 + v0 = v0<<32 | v0>>32 + + v2 += v3 + v3 = v3<<16 | v3>>48 + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>43 + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>47 + v1 ^= v2 + v2 = v2<<32 | v2>>32 + + // return v0 ^ v1 ^ v2 ^ v3 + + hash := v0 ^ v1 ^ v2 ^ v3 + h = hash >> bl.shift + l = hash << bl.shift >> bl.shift + return l, h + +} diff --git a/vendor/github.com/ipfs/bbloom/words.txt b/vendor/github.com/ipfs/bbloom/words.txt new file mode 100644 index 0000000000..ad86a31ac5 --- /dev/null +++ b/vendor/github.com/ipfs/bbloom/words.txt @@ -0,0 +1,140 @@ +2014/01/01 00:00:00 /info.html +2014/01/01 00:00:00 /info.html +2014/01/01 00:00:01 /info.html +2014/01/01 00:00:02 /info.html +2014/01/01 00:00:03 /info.html +2014/01/01 00:00:04 /info.html +2014/01/01 00:00:05 /info.html +2014/01/01 00:00:06 /info.html +2014/01/01 00:00:07 /info.html +2014/01/01 00:00:08 /info.html +2014/01/01 00:00:09 /info.html +2014/01/01 00:00:10 /info.html +2014/01/01 00:00:11 /info.html +2014/01/01 00:00:12 /info.html +2014/01/01 00:00:13 /info.html +2014/01/01 00:00:14 /info.html +2014/01/01 00:00:15 /info.html +2014/01/01 00:00:16 /info.html +2014/01/01 00:00:17 /info.html +2014/01/01 00:00:18 /info.html +2014/01/01 00:00:19 /info.html +2014/01/01 00:00:20 /info.html +2014/01/01 00:00:21 /info.html +2014/01/01 00:00:22 /info.html +2014/01/01 00:00:23 /info.html +2014/01/01 00:00:24 /info.html +2014/01/01 00:00:25 /info.html +2014/01/01 00:00:26 /info.html +2014/01/01 00:00:27 /info.html +2014/01/01 00:00:28 /info.html +2014/01/01 00:00:29 /info.html +2014/01/01 00:00:30 /info.html +2014/01/01 00:00:31 /info.html +2014/01/01 00:00:32 /info.html +2014/01/01 00:00:33 /info.html +2014/01/01 00:00:34 /info.html +2014/01/01 00:00:35 /info.html +2014/01/01 00:00:36 /info.html +2014/01/01 00:00:37 /info.html +2014/01/01 00:00:38 /info.html +2014/01/01 00:00:39 /info.html +2014/01/01 00:00:40 /info.html +2014/01/01 00:00:41 /info.html +2014/01/01 00:00:42 /info.html +2014/01/01 00:00:43 /info.html +2014/01/01 00:00:44 /info.html +2014/01/01 00:00:45 /info.html +2014/01/01 00:00:46 /info.html +2014/01/01 00:00:47 /info.html +2014/01/01 00:00:48 /info.html +2014/01/01 00:00:49 /info.html +2014/01/01 00:00:50 /info.html +2014/01/01 00:00:51 /info.html +2014/01/01 00:00:52 /info.html +2014/01/01 00:00:53 /info.html +2014/01/01 00:00:54 /info.html +2014/01/01 00:00:55 /info.html +2014/01/01 00:00:56 /info.html +2014/01/01 00:00:57 /info.html +2014/01/01 00:00:58 /info.html +2014/01/01 00:00:59 /info.html +2014/01/01 00:01:00 /info.html +2014/01/01 00:01:01 /info.html +2014/01/01 00:01:02 /info.html +2014/01/01 00:01:03 /info.html +2014/01/01 00:01:04 /info.html +2014/01/01 00:01:05 /info.html +2014/01/01 00:01:06 /info.html +2014/01/01 00:01:07 /info.html +2014/01/01 00:01:08 /info.html +2014/01/01 00:01:09 /info.html +2014/01/01 00:01:10 /info.html +2014/01/01 00:01:11 /info.html +2014/01/01 00:01:12 /info.html +2014/01/01 00:01:13 /info.html +2014/01/01 00:01:14 /info.html +2014/01/01 00:01:15 /info.html +2014/01/01 00:01:16 /info.html +2014/01/01 00:01:17 /info.html +2014/01/01 00:01:18 /info.html +2014/01/01 00:01:19 /info.html +2014/01/01 00:01:20 /info.html +2014/01/01 00:01:21 /info.html +2014/01/01 00:01:22 /info.html +2014/01/01 00:01:23 /info.html +2014/01/01 00:01:24 /info.html +2014/01/01 00:01:25 /info.html +2014/01/01 00:01:26 /info.html +2014/01/01 00:01:27 /info.html +2014/01/01 00:01:28 /info.html +2014/01/01 00:01:29 /info.html +2014/01/01 00:01:30 /info.html +2014/01/01 00:01:31 /info.html +2014/01/01 00:01:32 /info.html +2014/01/01 00:01:33 /info.html +2014/01/01 00:01:34 /info.html +2014/01/01 00:01:35 /info.html +2014/01/01 00:01:36 /info.html +2014/01/01 00:01:37 /info.html +2014/01/01 00:01:38 /info.html +2014/01/01 00:01:39 /info.html +2014/01/01 00:01:40 /info.html +2014/01/01 00:01:41 /info.html +2014/01/01 00:01:42 /info.html +2014/01/01 00:01:43 /info.html +2014/01/01 00:01:44 /info.html +2014/01/01 00:01:45 /info.html +2014/01/01 00:01:46 /info.html +2014/01/01 00:01:47 /info.html +2014/01/01 00:01:48 /info.html +2014/01/01 00:01:49 /info.html +2014/01/01 00:01:50 /info.html +2014/01/01 00:01:51 /info.html +2014/01/01 00:01:52 /info.html +2014/01/01 00:01:53 /info.html +2014/01/01 00:01:54 /info.html +2014/01/01 00:01:55 /info.html +2014/01/01 00:01:56 /info.html +2014/01/01 00:01:57 /info.html +2014/01/01 00:01:58 /info.html +2014/01/01 00:01:59 /info.html +2014/01/01 00:02:00 /info.html +2014/01/01 00:02:01 /info.html +2014/01/01 00:02:02 /info.html +2014/01/01 00:02:03 /info.html +2014/01/01 00:02:04 /info.html +2014/01/01 00:02:05 /info.html +2014/01/01 00:02:06 /info.html +2014/01/01 00:02:07 /info.html +2014/01/01 00:02:08 /info.html +2014/01/01 00:02:09 /info.html +2014/01/01 00:02:10 /info.html +2014/01/01 00:02:11 /info.html +2014/01/01 00:02:12 /info.html +2014/01/01 00:02:13 /info.html +2014/01/01 00:02:14 /info.html +2014/01/01 00:02:15 /info.html +2014/01/01 00:02:16 /info.html +2014/01/01 00:02:17 /info.html +2014/01/01 00:02:18 /info.html diff --git a/vendor/github.com/ipfs/go-block-format/.travis.yml b/vendor/github.com/ipfs/go-block-format/.travis.yml new file mode 100644 index 0000000000..4cfe98c242 --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-block-format/LICENSE b/vendor/github.com/ipfs/go-block-format/LICENSE new file mode 100644 index 0000000000..8001ebee6a --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2017 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-block-format/Makefile b/vendor/github.com/ipfs/go-block-format/Makefile new file mode 100644 index 0000000000..7811c099ea --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/Makefile @@ -0,0 +1,15 @@ +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go + +covertools: + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +deps: gx covertools + gx --verbose install --global + gx-go rewrite + +publish: + gx-go rewrite --undo + diff --git a/vendor/github.com/ipfs/go-block-format/README.md b/vendor/github.com/ipfs/go-block-format/README.md new file mode 100644 index 0000000000..67cd1fcd1a --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/README.md @@ -0,0 +1,35 @@ +go-block-format +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Coverage Status](https://codecov.io/gh/ipfs/go-block-format/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-block-format/branch/master) +[![Travis CI](https://travis-ci.org/ipfs/go-block-format.svg?branch=master)](https://travis-ci.org/ipfs/go-block-format) + +> go-block-format is a set of interfaces that a type needs to implement in order to be a CID addressable block of data. + + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [API](#api) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +make install +``` + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Juan Batiz-Benet diff --git a/vendor/github.com/ipfs/go-block-format/blocks.go b/vendor/github.com/ipfs/go-block-format/blocks.go new file mode 100644 index 0000000000..3d3894b3f3 --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/blocks.go @@ -0,0 +1,82 @@ +// Package blocks contains the lowest level of IPLD data structures. +// A block is raw data accompanied by a CID. The CID contains the multihash +// corresponding to the block. +package blocks + +import ( + "errors" + "fmt" + + cid "github.com/ipfs/go-cid" + u "github.com/ipfs/go-ipfs-util" + mh "github.com/multiformats/go-multihash" +) + +// ErrWrongHash is returned when the Cid of a block is not the expected +// according to the contents. It is currently used only when debugging. +var ErrWrongHash = errors.New("data did not match given hash") + +// Block provides abstraction for blocks implementations. +type Block interface { + RawData() []byte + Cid() cid.Cid + String() string + Loggable() map[string]interface{} +} + +// A BasicBlock is a singular block of data in ipfs. It implements the Block +// interface. +type BasicBlock struct { + cid cid.Cid + data []byte +} + +// NewBlock creates a Block object from opaque data. It will hash the data. +func NewBlock(data []byte) *BasicBlock { + // TODO: fix assumptions + return &BasicBlock{data: data, cid: cid.NewCidV0(u.Hash(data))} +} + +// NewBlockWithCid creates a new block when the hash of the data +// is already known, this is used to save time in situations where +// we are able to be confident that the data is correct. +func NewBlockWithCid(data []byte, c cid.Cid) (*BasicBlock, error) { + if u.Debug { + chkc, err := c.Prefix().Sum(data) + if err != nil { + return nil, err + } + + if !chkc.Equals(c) { + return nil, ErrWrongHash + } + } + return &BasicBlock{data: data, cid: c}, nil +} + +// Multihash returns the hash contained in the block CID. +func (b *BasicBlock) Multihash() mh.Multihash { + return b.cid.Hash() +} + +// RawData returns the block raw contents as a byte slice. +func (b *BasicBlock) RawData() []byte { + return b.data +} + +// Cid returns the content identifier of the block. +func (b *BasicBlock) Cid() cid.Cid { + return b.cid +} + +// String provides a human-readable representation of the block CID. +func (b *BasicBlock) String() string { + return fmt.Sprintf("[Block %s]", b.Cid()) +} + +// Loggable returns a go-log loggable item. +func (b *BasicBlock) Loggable() map[string]interface{} { + return map[string]interface{}{ + "block": b.Cid().String(), + } +} diff --git a/vendor/github.com/ipfs/go-block-format/codecov.yml b/vendor/github.com/ipfs/go-block-format/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/ipfs/go-block-format/go.mod b/vendor/github.com/ipfs/go-block-format/go.mod new file mode 100644 index 0000000000..f6ab8eabd8 --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/go.mod @@ -0,0 +1,7 @@ +module github.com/ipfs/go-block-format + +require ( + github.com/ipfs/go-cid v0.0.1 + github.com/ipfs/go-ipfs-util v0.0.1 + github.com/multiformats/go-multihash v0.0.1 +) diff --git a/vendor/github.com/ipfs/go-block-format/go.sum b/vendor/github.com/ipfs/go-block-format/go.sum new file mode 100644 index 0000000000..3fb22960b3 --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/go.sum @@ -0,0 +1,24 @@ +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/ipfs/go-block-format/package.json b/vendor/github.com/ipfs/go-block-format/package.json new file mode 100644 index 0000000000..970ac559b1 --- /dev/null +++ b/vendor/github.com/ipfs/go-block-format/package.json @@ -0,0 +1,36 @@ +{ + "author": "stebalien", + "bugs": { + "url": "https://github.com/ipfs/go-block-format" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-block-format" + }, + "gxDependencies": [ + { + "author": "whyrusleeping", + "hash": "QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN", + "name": "go-cid", + "version": "0.9.3" + }, + { + "author": "multiformats", + "hash": "QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW", + "name": "go-multihash", + "version": "1.0.9" + }, + { + "author": "whyrusleeping", + "hash": "QmNohiVssaPw3KVLZik59DBVGTSm2dGvYT9eoXt5DQ36Yz", + "name": "go-ipfs-util", + "version": "1.2.9" + } + ], + "gxVersion": "0.11.0", + "language": "go", + "license": "", + "name": "go-block-format", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "0.2.2" +} + diff --git a/vendor/github.com/ipfs/go-blockservice/LICENSE b/vendor/github.com/ipfs/go-blockservice/LICENSE new file mode 100644 index 0000000000..7d5dcac4d2 --- /dev/null +++ b/vendor/github.com/ipfs/go-blockservice/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2018 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-blockservice/README.md b/vendor/github.com/ipfs/go-blockservice/README.md new file mode 100644 index 0000000000..d36c5cc779 --- /dev/null +++ b/vendor/github.com/ipfs/go-blockservice/README.md @@ -0,0 +1,36 @@ +go-blockservice +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Coverage Status](https://codecov.io/gh/ipfs/go-block-format/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-block-format/branch/master) +[![Build Status](https://circleci.com/gh/ipfs/go-blockservice.svg?style=svg)](https://circleci.com/gh/ipfs/go-blockservice) + +> go-blockservice provides a seamless interface to both local and remote storage backends. + +## Lead Maintainer + +[Steven Allen](https://github.com/Stebalien) + +## Table of Contents + +- [TODO](#todo) +- [Contribute](#contribute) +- [License](#license) + +## TODO + +The interfaces here really would like to be merged with the blockstore interfaces. +The 'dagservice' constructor currently takes a blockservice, but it would be really nice +if it could just take a blockstore, and have this package implement a blockstore. + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Juan Batiz-Benet diff --git a/vendor/github.com/ipfs/go-blockservice/blockservice.go b/vendor/github.com/ipfs/go-blockservice/blockservice.go new file mode 100644 index 0000000000..ba0ab41830 --- /dev/null +++ b/vendor/github.com/ipfs/go-blockservice/blockservice.go @@ -0,0 +1,368 @@ +// package blockservice implements a BlockService interface that provides +// a single GetBlock/AddBlock interface that seamlessly retrieves data either +// locally or from a remote peer through the exchange. +package blockservice + +import ( + "context" + "errors" + "io" + "sync" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" + exchange "github.com/ipfs/go-ipfs-exchange-interface" + logging "github.com/ipfs/go-log" + "github.com/ipfs/go-verifcid" +) + +var log = logging.Logger("blockservice") + +var ErrNotFound = errors.New("blockservice: key not found") + +// BlockGetter is the common interface shared between blockservice sessions and +// the blockservice. +type BlockGetter interface { + // GetBlock gets the requested block. + GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) + + // GetBlocks does a batch request for the given cids, returning blocks as + // they are found, in no particular order. + // + // It may not be able to find all requested blocks (or the context may + // be canceled). In that case, it will close the channel early. It is up + // to the consumer to detect this situation and keep track which blocks + // it has received and which it hasn't. + GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block +} + +// BlockService is a hybrid block datastore. It stores data in a local +// datastore and may retrieve data from a remote Exchange. +// It uses an internal `datastore.Datastore` instance to store values. +type BlockService interface { + io.Closer + BlockGetter + + // Blockstore returns a reference to the underlying blockstore + Blockstore() blockstore.Blockstore + + // Exchange returns a reference to the underlying exchange (usually bitswap) + Exchange() exchange.Interface + + // AddBlock puts a given block to the underlying datastore + AddBlock(o blocks.Block) error + + // AddBlocks adds a slice of blocks at the same time using batching + // capabilities of the underlying datastore whenever possible. + AddBlocks(bs []blocks.Block) error + + // DeleteBlock deletes the given block from the blockservice. + DeleteBlock(o cid.Cid) error +} + +type blockService struct { + blockstore blockstore.Blockstore + exchange exchange.Interface + // If checkFirst is true then first check that a block doesn't + // already exist to avoid republishing the block on the exchange. + checkFirst bool +} + +// NewBlockService creates a BlockService with given datastore instance. +func New(bs blockstore.Blockstore, rem exchange.Interface) BlockService { + if rem == nil { + log.Debug("blockservice running in local (offline) mode.") + } + + return &blockService{ + blockstore: bs, + exchange: rem, + checkFirst: true, + } +} + +// NewWriteThrough ceates a BlockService that guarantees writes will go +// through to the blockstore and are not skipped by cache checks. +func NewWriteThrough(bs blockstore.Blockstore, rem exchange.Interface) BlockService { + if rem == nil { + log.Debug("blockservice running in local (offline) mode.") + } + + return &blockService{ + blockstore: bs, + exchange: rem, + checkFirst: false, + } +} + +// Blockstore returns the blockstore behind this blockservice. +func (s *blockService) Blockstore() blockstore.Blockstore { + return s.blockstore +} + +// Exchange returns the exchange behind this blockservice. +func (s *blockService) Exchange() exchange.Interface { + return s.exchange +} + +// NewSession creates a new session that allows for +// controlled exchange of wantlists to decrease the bandwidth overhead. +// If the current exchange is a SessionExchange, a new exchange +// session will be created. Otherwise, the current exchange will be used +// directly. +func NewSession(ctx context.Context, bs BlockService) *Session { + exch := bs.Exchange() + if sessEx, ok := exch.(exchange.SessionExchange); ok { + return &Session{ + sessCtx: ctx, + ses: nil, + sessEx: sessEx, + bs: bs.Blockstore(), + } + } + return &Session{ + ses: exch, + sessCtx: ctx, + bs: bs.Blockstore(), + } +} + +// AddBlock adds a particular block to the service, Putting it into the datastore. +// TODO pass a context into this if the remote.HasBlock is going to remain here. +func (s *blockService) AddBlock(o blocks.Block) error { + c := o.Cid() + // hash security + err := verifcid.ValidateCid(c) + if err != nil { + return err + } + if s.checkFirst { + if has, err := s.blockstore.Has(c); has || err != nil { + return err + } + } + + if err := s.blockstore.Put(o); err != nil { + return err + } + + log.Event(context.TODO(), "BlockService.BlockAdded", c) + + if s.exchange != nil { + if err := s.exchange.HasBlock(o); err != nil { + log.Errorf("HasBlock: %s", err.Error()) + } + } + + return nil +} + +func (s *blockService) AddBlocks(bs []blocks.Block) error { + // hash security + for _, b := range bs { + err := verifcid.ValidateCid(b.Cid()) + if err != nil { + return err + } + } + var toput []blocks.Block + if s.checkFirst { + toput = make([]blocks.Block, 0, len(bs)) + for _, b := range bs { + has, err := s.blockstore.Has(b.Cid()) + if err != nil { + return err + } + if !has { + toput = append(toput, b) + } + } + } else { + toput = bs + } + + if len(toput) == 0 { + return nil + } + + err := s.blockstore.PutMany(toput) + if err != nil { + return err + } + + if s.exchange != nil { + for _, o := range toput { + log.Event(context.TODO(), "BlockService.BlockAdded", o.Cid()) + if err := s.exchange.HasBlock(o); err != nil { + log.Errorf("HasBlock: %s", err.Error()) + } + } + } + return nil +} + +// GetBlock retrieves a particular block from the service, +// Getting it from the datastore using the key (hash). +func (s *blockService) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + log.Debugf("BlockService GetBlock: '%s'", c) + + var f func() exchange.Fetcher + if s.exchange != nil { + f = s.getExchange + } + + return getBlock(ctx, c, s.blockstore, f) // hash security +} + +func (s *blockService) getExchange() exchange.Fetcher { + return s.exchange +} + +func getBlock(ctx context.Context, c cid.Cid, bs blockstore.Blockstore, fget func() exchange.Fetcher) (blocks.Block, error) { + err := verifcid.ValidateCid(c) // hash security + if err != nil { + return nil, err + } + + block, err := bs.Get(c) + if err == nil { + return block, nil + } + + if err == blockstore.ErrNotFound && fget != nil { + f := fget() // Don't load the exchange until we have to + + // TODO be careful checking ErrNotFound. If the underlying + // implementation changes, this will break. + log.Debug("Blockservice: Searching bitswap") + blk, err := f.GetBlock(ctx, c) + if err != nil { + if err == blockstore.ErrNotFound { + return nil, ErrNotFound + } + return nil, err + } + log.Event(ctx, "BlockService.BlockFetched", c) + return blk, nil + } + + log.Debug("Blockservice GetBlock: Not found") + if err == blockstore.ErrNotFound { + return nil, ErrNotFound + } + + return nil, err +} + +// GetBlocks gets a list of blocks asynchronously and returns through +// the returned channel. +// NB: No guarantees are made about order. +func (s *blockService) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block { + var f func() exchange.Fetcher + if s.exchange != nil { + f = s.getExchange + } + + return getBlocks(ctx, ks, s.blockstore, f) // hash security +} + +func getBlocks(ctx context.Context, ks []cid.Cid, bs blockstore.Blockstore, fget func() exchange.Fetcher) <-chan blocks.Block { + out := make(chan blocks.Block) + + go func() { + defer close(out) + + k := 0 + for _, c := range ks { + // hash security + if err := verifcid.ValidateCid(c); err == nil { + ks[k] = c + k++ + } else { + log.Errorf("unsafe CID (%s) passed to blockService.GetBlocks: %s", c, err) + } + } + ks = ks[:k] + + var misses []cid.Cid + for _, c := range ks { + hit, err := bs.Get(c) + if err != nil { + misses = append(misses, c) + continue + } + select { + case out <- hit: + case <-ctx.Done(): + return + } + } + + if len(misses) == 0 || fget == nil { + return + } + + f := fget() // don't load exchange unless we have to + rblocks, err := f.GetBlocks(ctx, misses) + if err != nil { + log.Debugf("Error with GetBlocks: %s", err) + return + } + + for b := range rblocks { + log.Event(ctx, "BlockService.BlockFetched", b.Cid()) + select { + case out <- b: + case <-ctx.Done(): + return + } + } + }() + return out +} + +// DeleteBlock deletes a block in the blockservice from the datastore +func (s *blockService) DeleteBlock(c cid.Cid) error { + err := s.blockstore.DeleteBlock(c) + if err == nil { + log.Event(context.TODO(), "BlockService.BlockDeleted", c) + } + return err +} + +func (s *blockService) Close() error { + log.Debug("blockservice is shutting down...") + return s.exchange.Close() +} + +// Session is a helper type to provide higher level access to bitswap sessions +type Session struct { + bs blockstore.Blockstore + ses exchange.Fetcher + sessEx exchange.SessionExchange + sessCtx context.Context + lk sync.Mutex +} + +func (s *Session) getSession() exchange.Fetcher { + s.lk.Lock() + defer s.lk.Unlock() + if s.ses == nil { + s.ses = s.sessEx.NewSession(s.sessCtx) + } + + return s.ses +} + +// GetBlock gets a block in the context of a request session +func (s *Session) GetBlock(ctx context.Context, c cid.Cid) (blocks.Block, error) { + return getBlock(ctx, c, s.bs, s.getSession) // hash security +} + +// GetBlocks gets blocks in the context of a request session +func (s *Session) GetBlocks(ctx context.Context, ks []cid.Cid) <-chan blocks.Block { + return getBlocks(ctx, ks, s.bs, s.getSession) // hash security +} + +var _ BlockGetter = (*Session)(nil) diff --git a/vendor/github.com/ipfs/go-blockservice/go.mod b/vendor/github.com/ipfs/go-blockservice/go.mod new file mode 100644 index 0000000000..b966f19f0f --- /dev/null +++ b/vendor/github.com/ipfs/go-blockservice/go.mod @@ -0,0 +1,19 @@ +module github.com/ipfs/go-blockservice + +go 1.13 + +require ( + github.com/ipfs/go-bitswap v0.1.8 + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.5 + github.com/ipfs/go-datastore v0.4.4 + github.com/ipfs/go-ipfs-blockstore v0.1.4 + github.com/ipfs/go-ipfs-blocksutil v0.0.1 + github.com/ipfs/go-ipfs-delay v0.0.1 + github.com/ipfs/go-ipfs-exchange-interface v0.0.1 + github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipfs-routing v0.1.0 + github.com/ipfs/go-ipfs-util v0.0.1 + github.com/ipfs/go-log v0.0.1 + github.com/ipfs/go-verifcid v0.0.1 +) diff --git a/vendor/github.com/ipfs/go-blockservice/go.sum b/vendor/github.com/ipfs/go-blockservice/go.sum new file mode 100644 index 0000000000..0fc7ec2783 --- /dev/null +++ b/vendor/github.com/ipfs/go-blockservice/go.sum @@ -0,0 +1,363 @@ +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32 h1:qkOC5Gd33k54tobS36cXdAzJbeHaduLtnLQQwNoIi78= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c h1:aEbSeNALREWXk0G7UdNhR3ayBV7tZ4M2PNmnrCAph6Q= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50 h1:4i3KsuVA0o0KoBxAC5x+MY7RbteiMK1V7gf/G08NGIQ= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ipfs/bbloom v0.0.1 h1:s7KkiBPfxCeDVo47KySjK0ACPc5GJRUxFpdyWEuDjhw= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.1.8 h1:38X1mKXkiU6Nzw4TOSWD8eTVY5eX3slQunv3QEWfXKg= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2 h1:tuuKaZPU1M6HcejsO3AcYWW8sZ8MTvyxfc4uqB4eFE8= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-datastore v0.0.1 h1:AW/KZCScnBWlSb5JbnEnLKFWXL224LBEh/9KXXOrUms= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5 h1:q3OfiOZV5rlsK1H5V8benjeUApRfMGs4Mrhmr6NriQo= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1 h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ipfs-blockstore v0.0.1 h1:O9n3PbmTYZoNhkgkEyrXTznbmktIXif62xLX+8dPHzc= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.1.1 h1:+gPjbI+V3NktXZOqJA1kzbms2pYmhjgQQal0MzZrOAY= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec h1:DQqZhhDvrTrEQ3Qod5yfavcA064e53xlQ+xajiorXgM= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-buffer-pool v0.0.1 h1:9Rrn/H46cXjaA2HQ5Y8lyhOS1NhTkZ4yuEs2r3Eechg= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-flow-metrics v0.0.1 h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-libp2p v0.1.1 h1:52sB0TJuDk2nYMcMfHOKaPoaayDZjaYVCq6Vk1ejUTk= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-blankhost v0.1.1 h1:X919sCh+KLqJcNRApj43xCSiQRYqOSI88Fdf55ngf78= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-core v0.0.1 h1:HSTZtFIq/W5Ue43Zw+uWZyy2Vl5WtF0zDjKN8/DT/1I= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2 h1:86uOwW+O6Uc7NbaK4diuLZo2/Ikvqw2rgyV03VcSbLE= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3 h1:+IonUYY0nJZLb5Fdv6a6DOjtGP1L8Bb3faamiI2q5FY= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-nat v0.0.4 h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0 h1:MKh7pRNPHSh1fLPj8u/M/s/napdmeNpoi9BRy9lPN0E= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-record v0.1.0 h1:wHwBGbFzymoIl69BpgwIu0O6ta3TXGcMPvHUAcodzRc= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-secio v0.1.0 h1:NNP5KLxuP97sE5Bu3iuwOWyT/dKEGMN5zSLMWdB7GTQ= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-swarm v0.1.0 h1:HrFk2p0awrGEgch9JXK/qp/hfjqQfgNxpLWnCiWPg5s= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-testing v0.0.2 h1:p9ySW7MFvGGs83hAAe0MPGnjy/tPjl5KyxpMkojdZ+g= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3 h1:bdij4bKaaND7tCsaXVjRfYkMpvoOeKj9AVQGJllA6jM= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4 h1:Qev57UR47GcLPXWjrunv5aLIQGO4n9mhI/8/EIrEEFc= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-yamux v0.2.0 h1:TSPZ5cMMz/wdoYsye/wU1TE4G3LDGMoeEN0xgnCKU/I= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1 h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-maddr-filter v0.0.4 h1:hx8HIuuwk34KePddrp2mM5ivgPkZ09JH4AvsALRbFUs= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-msgio v0.0.2 h1:ivPvEKHxmVkTClHzg6RXTYHqaJQ0V9cDbq+6lKb3UV0= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-stream-muxer v0.0.1 h1:Ce6e2Pyu+b5MC1k3eeFtAax0pW4gc6MosYSLV05UeLw= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-tcp-transport v0.1.0 h1:IGhowvEqyMFknOar4FWCKSWE0zL36UFKQtiRQD60/8o= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-yamux v1.2.2 h1:s6J6o7+ajoQMjHe7BEnq+EynOj5D2EoG8CuQgL3F2vg= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0 h1:U41/2erhAKcmSI14xh/ZTUdBPOzDOIfS93ibzUSl8KM= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multiaddr v0.0.1 h1:/QUV3VBMDI6pi6xfiw7lr6xhDWWvQKn9udPn68kLSdY= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2 h1:/Bbsgsy3R6e3jf2qBahzNHzww6usYaZ0NhNH3sqdFS8= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5 h1:1wxmCvTXAifAepIMyF39vZinRw5sbqjPs/UIi93+uik= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7 h1:C2F/nMkR/9sfUTpvR3QrjBuTdvMUC/cFajkphs1YLQo= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a h1:+KkCgOMgnKSgenxTBoiwkMqTiouMIy/3o8RLdmSbGoY= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e h1:ZytStCyV048ZqDsWHiYDdoI2Vd4msMcrDECFxS+tL9c= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae h1:xiXzMMEQdQcric9hXtr1QU98MHunKK7OTtsoU6bYWs4= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ipfs/go-cid/.gitignore b/vendor/github.com/ipfs/go-cid/.gitignore new file mode 100644 index 0000000000..aaea8ed0a7 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/.gitignore @@ -0,0 +1 @@ +cid-fuzz.zip diff --git a/vendor/github.com/ipfs/go-cid/.travis.yml b/vendor/github.com/ipfs/go-cid/.travis.yml new file mode 100644 index 0000000000..5163d693fc --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-cid/LICENSE b/vendor/github.com/ipfs/go-cid/LICENSE new file mode 100644 index 0000000000..0e323020a6 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-cid/Makefile b/vendor/github.com/ipfs/go-cid/Makefile new file mode 100644 index 0000000000..554bed3258 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/Makefile @@ -0,0 +1,5 @@ +all: deps + +deps: + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/ipfs/go-cid/README.md b/vendor/github.com/ipfs/go-cid/README.md new file mode 100644 index 0000000000..4f54343a97 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/README.md @@ -0,0 +1,108 @@ +go-cid +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-cid?status.svg)](https://godoc.org/github.com/ipfs/go-cid) +[![Coverage Status](https://coveralls.io/repos/github/ipfs/go-cid/badge.svg?branch=master)](https://coveralls.io/github/ipfs/go-cid?branch=master) +[![Travis CI](https://travis-ci.org/ipfs/go-cid.svg?branch=master)](https://travis-ci.org/ipfs/go-cid) + +> A package to handle content IDs in Go. + +This is an implementation in Go of the [CID spec](https://github.com/ipld/cid). +It is used in `go-ipfs` and related packages to refer to a typed hunk of data. + +## Lead Maintainer + +[Eric Myhre](https://github.com/warpfork) + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [API](#api) +- [Contribute](#contribute) +- [License](#license) + +## Install + +`go-cid` is a standard Go module which can be installed with: + +```sh +go get github.com/ipfs/go-cid +``` + +## Usage + +### Running tests + +Run tests with `go test` from the directory root + +```sh +go test +``` + +### Examples + +#### Parsing string input from users + +```go +// Create a cid from a marshaled string +c, err := cid.Decode("bafzbeigai3eoy2ccc7ybwjfz5r3rdxqrinwi4rwytly24tdbh6yk7zslrm") +if err != nil {...} + +fmt.Println("Got CID: ", c) +``` + +#### Creating a CID from scratch + +```go +// Create a cid manually by specifying the 'prefix' parameters +pref := cid.Prefix{ + Version: 1, + Codec: cid.Raw, + MhType: mh.SHA2_256, + MhLength: -1, // default length +} + +// And then feed it some data +c, err := pref.Sum([]byte("Hello World!")) +if err != nil {...} + +fmt.Println("Created CID: ", c) +``` + +#### Check if two CIDs match + +```go +// To test if two cid's are equivalent, be sure to use the 'Equals' method: +if c1.Equals(c2) { + fmt.Println("These two refer to the same exact data!") +} +``` + +#### Check if some data matches a given CID + +```go +// To check if some data matches a given cid, +// Get your CIDs prefix, and use that to sum the data in question: +other, err := c.Prefix().Sum(mydata) +if err != nil {...} + +if !c.Equals(other) { + fmt.Println("This data is different.") +} + +``` + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Jeromy Johnson diff --git a/vendor/github.com/ipfs/go-cid/builder.go b/vendor/github.com/ipfs/go-cid/builder.go new file mode 100644 index 0000000000..3d2fc77cbd --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/builder.go @@ -0,0 +1,74 @@ +package cid + +import ( + mh "github.com/multiformats/go-multihash" +) + +type Builder interface { + Sum(data []byte) (Cid, error) + GetCodec() uint64 + WithCodec(uint64) Builder +} + +type V0Builder struct{} + +type V1Builder struct { + Codec uint64 + MhType uint64 + MhLength int // MhLength <= 0 means the default length +} + +func (p Prefix) GetCodec() uint64 { + return p.Codec +} + +func (p Prefix) WithCodec(c uint64) Builder { + if c == p.Codec { + return p + } + p.Codec = c + if c != DagProtobuf { + p.Version = 1 + } + return p +} + +func (p V0Builder) Sum(data []byte) (Cid, error) { + hash, err := mh.Sum(data, mh.SHA2_256, -1) + if err != nil { + return Undef, err + } + return Cid{string(hash)}, nil +} + +func (p V0Builder) GetCodec() uint64 { + return DagProtobuf +} + +func (p V0Builder) WithCodec(c uint64) Builder { + if c == DagProtobuf { + return p + } + return V1Builder{Codec: c, MhType: mh.SHA2_256} +} + +func (p V1Builder) Sum(data []byte) (Cid, error) { + mhLen := p.MhLength + if mhLen <= 0 { + mhLen = -1 + } + hash, err := mh.Sum(data, p.MhType, mhLen) + if err != nil { + return Undef, err + } + return NewCidV1(p.Codec, hash), nil +} + +func (p V1Builder) GetCodec() uint64 { + return p.Codec +} + +func (p V1Builder) WithCodec(c uint64) Builder { + p.Codec = c + return p +} diff --git a/vendor/github.com/ipfs/go-cid/cid.go b/vendor/github.com/ipfs/go-cid/cid.go new file mode 100644 index 0000000000..1268f39e02 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/cid.go @@ -0,0 +1,677 @@ +// Package cid implements the Content-IDentifiers specification +// (https://github.com/ipld/cid) in Go. CIDs are +// self-describing content-addressed identifiers useful for +// distributed information systems. CIDs are used in the IPFS +// (https://ipfs.io) project ecosystem. +// +// CIDs have two major versions. A CIDv0 corresponds to a multihash of type +// DagProtobuf, is deprecated and exists for compatibility reasons. Usually, +// CIDv1 should be used. +// +// A CIDv1 has four parts: +// +// ::= +// +// As shown above, the CID implementation relies heavily on Multiformats, +// particularly Multibase +// (https://github.com/multiformats/go-multibase), Multicodec +// (https://github.com/multiformats/multicodec) and Multihash +// implementations (https://github.com/multiformats/go-multihash). +package cid + +import ( + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "strings" + + mbase "github.com/multiformats/go-multibase" + mh "github.com/multiformats/go-multihash" + varint "github.com/multiformats/go-varint" +) + +// UnsupportedVersionString just holds an error message +const UnsupportedVersionString = "" + +var ( + // ErrCidTooShort means that the cid passed to decode was not long + // enough to be a valid Cid + ErrCidTooShort = errors.New("cid too short") + + // ErrInvalidEncoding means that selected encoding is not supported + // by this Cid version + ErrInvalidEncoding = errors.New("invalid base encoding") +) + +// These are multicodec-packed content types. The should match +// the codes described in the authoritative document: +// https://github.com/multiformats/multicodec/blob/master/table.csv +const ( + Raw = 0x55 + + DagProtobuf = 0x70 + DagCBOR = 0x71 + Libp2pKey = 0x72 + + GitRaw = 0x78 + + EthBlock = 0x90 + EthBlockList = 0x91 + EthTxTrie = 0x92 + EthTx = 0x93 + EthTxReceiptTrie = 0x94 + EthTxReceipt = 0x95 + EthStateTrie = 0x96 + EthAccountSnapshot = 0x97 + EthStorageTrie = 0x98 + BitcoinBlock = 0xb0 + BitcoinTx = 0xb1 + ZcashBlock = 0xc0 + ZcashTx = 0xc1 + DecredBlock = 0xe0 + DecredTx = 0xe1 + DashBlock = 0xf0 + DashTx = 0xf1 + FilCommitmentUnsealed = 0xf101 + FilCommitmentSealed = 0xf102 +) + +// Codecs maps the name of a codec to its type +var Codecs = map[string]uint64{ + "v0": DagProtobuf, + "raw": Raw, + "protobuf": DagProtobuf, + "cbor": DagCBOR, + "libp2p-key": Libp2pKey, + "git-raw": GitRaw, + "eth-block": EthBlock, + "eth-block-list": EthBlockList, + "eth-tx-trie": EthTxTrie, + "eth-tx": EthTx, + "eth-tx-receipt-trie": EthTxReceiptTrie, + "eth-tx-receipt": EthTxReceipt, + "eth-state-trie": EthStateTrie, + "eth-account-snapshot": EthAccountSnapshot, + "eth-storage-trie": EthStorageTrie, + "bitcoin-block": BitcoinBlock, + "bitcoin-tx": BitcoinTx, + "zcash-block": ZcashBlock, + "zcash-tx": ZcashTx, + "decred-block": DecredBlock, + "decred-tx": DecredTx, + "dash-block": DashBlock, + "dash-tx": DashTx, + "fil-commitment-unsealed": FilCommitmentUnsealed, + "fil-commitment-sealed": FilCommitmentSealed, +} + +// CodecToStr maps the numeric codec to its name +var CodecToStr = map[uint64]string{ + Raw: "raw", + DagProtobuf: "protobuf", + DagCBOR: "cbor", + GitRaw: "git-raw", + EthBlock: "eth-block", + EthBlockList: "eth-block-list", + EthTxTrie: "eth-tx-trie", + EthTx: "eth-tx", + EthTxReceiptTrie: "eth-tx-receipt-trie", + EthTxReceipt: "eth-tx-receipt", + EthStateTrie: "eth-state-trie", + EthAccountSnapshot: "eth-account-snapshot", + EthStorageTrie: "eth-storage-trie", + BitcoinBlock: "bitcoin-block", + BitcoinTx: "bitcoin-tx", + ZcashBlock: "zcash-block", + ZcashTx: "zcash-tx", + DecredBlock: "decred-block", + DecredTx: "decred-tx", + DashBlock: "dash-block", + DashTx: "dash-tx", + FilCommitmentUnsealed: "fil-commitment-unsealed", + FilCommitmentSealed: "fil-commitment-sealed", +} + +// tryNewCidV0 tries to convert a multihash into a CIDv0 CID and returns an +// error on failure. +func tryNewCidV0(mhash mh.Multihash) (Cid, error) { + // Need to make sure hash is valid for CidV0 otherwise we will + // incorrectly detect it as CidV1 in the Version() method + dec, err := mh.Decode(mhash) + if err != nil { + return Undef, err + } + if dec.Code != mh.SHA2_256 || dec.Length != 32 { + return Undef, fmt.Errorf("invalid hash for cidv0 %d-%d", dec.Code, dec.Length) + } + return Cid{string(mhash)}, nil +} + +// NewCidV0 returns a Cid-wrapped multihash. +// They exist to allow IPFS to work with Cids while keeping +// compatibility with the plain-multihash format used used in IPFS. +// NewCidV1 should be used preferentially. +// +// Panics if the multihash isn't sha2-256. +func NewCidV0(mhash mh.Multihash) Cid { + c, err := tryNewCidV0(mhash) + if err != nil { + panic(err) + } + return c +} + +// NewCidV1 returns a new Cid using the given multicodec-packed +// content type. +// +// Panics if the multihash is invalid. +func NewCidV1(codecType uint64, mhash mh.Multihash) Cid { + hashlen := len(mhash) + // two 8 bytes (max) numbers plus hash + buf := make([]byte, 1+varint.UvarintSize(codecType)+hashlen) + n := varint.PutUvarint(buf, 1) + n += varint.PutUvarint(buf[n:], codecType) + cn := copy(buf[n:], mhash) + if cn != hashlen { + panic("copy hash length is inconsistent") + } + + return Cid{string(buf[:n+hashlen])} +} + +var _ encoding.BinaryMarshaler = Cid{} +var _ encoding.BinaryUnmarshaler = (*Cid)(nil) +var _ encoding.TextMarshaler = Cid{} +var _ encoding.TextUnmarshaler = (*Cid)(nil) + +// Cid represents a self-describing content addressed +// identifier. It is formed by a Version, a Codec (which indicates +// a multicodec-packed content type) and a Multihash. +type Cid struct{ str string } + +// Undef can be used to represent a nil or undefined Cid, using Cid{} +// directly is also acceptable. +var Undef = Cid{} + +// Defined returns true if a Cid is defined +// Calling any other methods on an undefined Cid will result in +// undefined behavior. +func (c Cid) Defined() bool { + return c.str != "" +} + +// Parse is a short-hand function to perform Decode, Cast etc... on +// a generic interface{} type. +func Parse(v interface{}) (Cid, error) { + switch v2 := v.(type) { + case string: + if strings.Contains(v2, "/ipfs/") { + return Decode(strings.Split(v2, "/ipfs/")[1]) + } + return Decode(v2) + case []byte: + return Cast(v2) + case mh.Multihash: + return tryNewCidV0(v2) + case Cid: + return v2, nil + default: + return Undef, fmt.Errorf("can't parse %+v as Cid", v2) + } +} + +// Decode parses a Cid-encoded string and returns a Cid object. +// For CidV1, a Cid-encoded string is primarily a multibase string: +// +// +// +// The base-encoded string represents a: +// +// +// +// Decode will also detect and parse CidV0 strings. Strings +// starting with "Qm" are considered CidV0 and treated directly +// as B58-encoded multihashes. +func Decode(v string) (Cid, error) { + if len(v) < 2 { + return Undef, ErrCidTooShort + } + + if len(v) == 46 && v[:2] == "Qm" { + hash, err := mh.FromB58String(v) + if err != nil { + return Undef, err + } + + return tryNewCidV0(hash) + } + + _, data, err := mbase.Decode(v) + if err != nil { + return Undef, err + } + + return Cast(data) +} + +// Extract the encoding from a Cid. If Decode on the same string did +// not return an error neither will this function. +func ExtractEncoding(v string) (mbase.Encoding, error) { + if len(v) < 2 { + return -1, ErrCidTooShort + } + + if len(v) == 46 && v[:2] == "Qm" { + return mbase.Base58BTC, nil + } + + encoding := mbase.Encoding(v[0]) + + // check encoding is valid + _, err := mbase.NewEncoder(encoding) + if err != nil { + return -1, err + } + + return encoding, nil +} + +// Cast takes a Cid data slice, parses it and returns a Cid. +// For CidV1, the data buffer is in the form: +// +// +// +// CidV0 are also supported. In particular, data buffers starting +// with length 34 bytes, which starts with bytes [18,32...] are considered +// binary multihashes. +// +// Please use decode when parsing a regular Cid string, as Cast does not +// expect multibase-encoded data. Cast accepts the output of Cid.Bytes(). +func Cast(data []byte) (Cid, error) { + nr, c, err := CidFromBytes(data) + if err != nil { + return Undef, err + } + + if nr != len(data) { + return Undef, fmt.Errorf("trailing bytes in data buffer passed to cid Cast") + } + + return c, nil +} + +// UnmarshalBinary is equivalent to Cast(). It implements the +// encoding.BinaryUnmarshaler interface. +func (c *Cid) UnmarshalBinary(data []byte) error { + casted, err := Cast(data) + if err != nil { + return err + } + c.str = casted.str + return nil +} + +// UnmarshalText is equivalent to Decode(). It implements the +// encoding.TextUnmarshaler interface. +func (c *Cid) UnmarshalText(text []byte) error { + decodedCid, err := Decode(string(text)) + if err != nil { + return err + } + c.str = decodedCid.str + return nil +} + +// Version returns the Cid version. +func (c Cid) Version() uint64 { + if len(c.str) == 34 && c.str[0] == 18 && c.str[1] == 32 { + return 0 + } + return 1 +} + +// Type returns the multicodec-packed content type of a Cid. +func (c Cid) Type() uint64 { + if c.Version() == 0 { + return DagProtobuf + } + _, n, _ := uvarint(c.str) + codec, _, _ := uvarint(c.str[n:]) + return codec +} + +// String returns the default string representation of a +// Cid. Currently, Base32 is used for CIDV1 as the encoding for the +// multibase string, Base58 is used for CIDV0. +func (c Cid) String() string { + switch c.Version() { + case 0: + return c.Hash().B58String() + case 1: + mbstr, err := mbase.Encode(mbase.Base32, c.Bytes()) + if err != nil { + panic("should not error with hardcoded mbase: " + err.Error()) + } + + return mbstr + default: + panic("not possible to reach this point") + } +} + +// String returns the string representation of a Cid +// encoded is selected base +func (c Cid) StringOfBase(base mbase.Encoding) (string, error) { + switch c.Version() { + case 0: + if base != mbase.Base58BTC { + return "", ErrInvalidEncoding + } + return c.Hash().B58String(), nil + case 1: + return mbase.Encode(base, c.Bytes()) + default: + panic("not possible to reach this point") + } +} + +// Encode return the string representation of a Cid in a given base +// when applicable. Version 0 Cid's are always in Base58 as they do +// not take a multibase prefix. +func (c Cid) Encode(base mbase.Encoder) string { + switch c.Version() { + case 0: + return c.Hash().B58String() + case 1: + return base.Encode(c.Bytes()) + default: + panic("not possible to reach this point") + } +} + +// Hash returns the multihash contained by a Cid. +func (c Cid) Hash() mh.Multihash { + bytes := c.Bytes() + + if c.Version() == 0 { + return mh.Multihash(bytes) + } + + // skip version length + _, n1, _ := varint.FromUvarint(bytes) + // skip codec length + _, n2, _ := varint.FromUvarint(bytes[n1:]) + + return mh.Multihash(bytes[n1+n2:]) +} + +// Bytes returns the byte representation of a Cid. +// The output of bytes can be parsed back into a Cid +// with Cast(). +func (c Cid) Bytes() []byte { + return []byte(c.str) +} + +// ByteLen returns the length of the CID in bytes. +// It's equivalent to `len(c.Bytes())`, but works without an allocation, +// and should therefore be preferred. +// +// (See also the WriteTo method for other important operations that work without allocation.) +func (c Cid) ByteLen() int { + return len(c.str) +} + +// WriteBytes writes the CID bytes to the given writer. +// This method works without incurring any allocation. +// +// (See also the ByteLen method for other important operations that work without allocation.) +func (c Cid) WriteBytes(w io.Writer) (int, error) { + n, err := io.WriteString(w, c.str) + if err != nil { + return n, err + } + if n != len(c.str) { + return n, fmt.Errorf("failed to write entire cid string") + } + return n, nil +} + +// MarshalBinary is equivalent to Bytes(). It implements the +// encoding.BinaryMarshaler interface. +func (c Cid) MarshalBinary() ([]byte, error) { + return c.Bytes(), nil +} + +// MarshalText is equivalent to String(). It implements the +// encoding.TextMarshaler interface. +func (c Cid) MarshalText() ([]byte, error) { + return []byte(c.String()), nil +} + +// Equals checks that two Cids are the same. +// In order for two Cids to be considered equal, the +// Version, the Codec and the Multihash must match. +func (c Cid) Equals(o Cid) bool { + return c == o +} + +// UnmarshalJSON parses the JSON representation of a Cid. +func (c *Cid) UnmarshalJSON(b []byte) error { + if len(b) < 2 { + return fmt.Errorf("invalid cid json blob") + } + obj := struct { + CidTarget string `json:"/"` + }{} + objptr := &obj + err := json.Unmarshal(b, &objptr) + if err != nil { + return err + } + if objptr == nil { + *c = Cid{} + return nil + } + + if obj.CidTarget == "" { + return fmt.Errorf("cid was incorrectly formatted") + } + + out, err := Decode(obj.CidTarget) + if err != nil { + return err + } + + *c = out + + return nil +} + +// MarshalJSON procudes a JSON representation of a Cid, which looks as follows: +// +// { "/": "" } +// +// Note that this formatting comes from the IPLD specification +// (https://github.com/ipld/specs/tree/master/ipld) +func (c Cid) MarshalJSON() ([]byte, error) { + if !c.Defined() { + return []byte("null"), nil + } + return []byte(fmt.Sprintf("{\"/\":\"%s\"}", c.String())), nil +} + +// KeyString returns the binary representation of the Cid as a string +func (c Cid) KeyString() string { + return c.str +} + +// Loggable returns a Loggable (as defined by +// https://godoc.org/github.com/ipfs/go-log). +func (c Cid) Loggable() map[string]interface{} { + return map[string]interface{}{ + "cid": c, + } +} + +// Prefix builds and returns a Prefix out of a Cid. +func (c Cid) Prefix() Prefix { + if c.Version() == 0 { + return Prefix{ + MhType: mh.SHA2_256, + MhLength: 32, + Version: 0, + Codec: DagProtobuf, + } + } + + offset := 0 + version, n, _ := uvarint(c.str[offset:]) + offset += n + codec, n, _ := uvarint(c.str[offset:]) + offset += n + mhtype, n, _ := uvarint(c.str[offset:]) + offset += n + mhlen, _, _ := uvarint(c.str[offset:]) + + return Prefix{ + MhType: mhtype, + MhLength: int(mhlen), + Version: version, + Codec: codec, + } +} + +// Prefix represents all the metadata of a Cid, +// that is, the Version, the Codec, the Multihash type +// and the Multihash length. It does not contains +// any actual content information. +// NOTE: The use -1 in MhLength to mean default length is deprecated, +// use the V0Builder or V1Builder structures instead +type Prefix struct { + Version uint64 + Codec uint64 + MhType uint64 + MhLength int +} + +// Sum uses the information in a prefix to perform a multihash.Sum() +// and return a newly constructed Cid with the resulting multihash. +func (p Prefix) Sum(data []byte) (Cid, error) { + length := p.MhLength + if p.MhType == mh.ID { + length = -1 + } + + if p.Version == 0 && (p.MhType != mh.SHA2_256 || + (p.MhLength != 32 && p.MhLength != -1)) { + + return Undef, fmt.Errorf("invalid v0 prefix") + } + + hash, err := mh.Sum(data, p.MhType, length) + if err != nil { + return Undef, err + } + + switch p.Version { + case 0: + return NewCidV0(hash), nil + case 1: + return NewCidV1(p.Codec, hash), nil + default: + return Undef, fmt.Errorf("invalid cid version") + } +} + +// Bytes returns a byte representation of a Prefix. It looks like: +// +// +func (p Prefix) Bytes() []byte { + size := varint.UvarintSize(p.Version) + size += varint.UvarintSize(p.Codec) + size += varint.UvarintSize(p.MhType) + size += varint.UvarintSize(uint64(p.MhLength)) + + buf := make([]byte, size) + n := varint.PutUvarint(buf, p.Version) + n += varint.PutUvarint(buf[n:], p.Codec) + n += varint.PutUvarint(buf[n:], p.MhType) + n += varint.PutUvarint(buf[n:], uint64(p.MhLength)) + if n != size { + panic("size mismatch") + } + return buf +} + +// PrefixFromBytes parses a Prefix-byte representation onto a +// Prefix. +func PrefixFromBytes(buf []byte) (Prefix, error) { + r := bytes.NewReader(buf) + vers, err := varint.ReadUvarint(r) + if err != nil { + return Prefix{}, err + } + + codec, err := varint.ReadUvarint(r) + if err != nil { + return Prefix{}, err + } + + mhtype, err := varint.ReadUvarint(r) + if err != nil { + return Prefix{}, err + } + + mhlen, err := varint.ReadUvarint(r) + if err != nil { + return Prefix{}, err + } + + return Prefix{ + Version: vers, + Codec: codec, + MhType: mhtype, + MhLength: int(mhlen), + }, nil +} + +func CidFromBytes(data []byte) (int, Cid, error) { + if len(data) > 2 && data[0] == mh.SHA2_256 && data[1] == 32 { + if len(data) < 34 { + return 0, Undef, fmt.Errorf("not enough bytes for cid v0") + } + + h, err := mh.Cast(data[:34]) + if err != nil { + return 0, Undef, err + } + + return 34, Cid{string(h)}, nil + } + + vers, n, err := varint.FromUvarint(data) + if err != nil { + return 0, Undef, err + } + + if vers != 1 { + return 0, Undef, fmt.Errorf("expected 1 as the cid version number, got: %d", vers) + } + + _, cn, err := varint.FromUvarint(data[n:]) + if err != nil { + return 0, Undef, err + } + + mhnr, _, err := mh.MHFromBytes(data[n+cn:]) + if err != nil { + return 0, Undef, err + } + + l := n + cn + mhnr + + return l, Cid{string(data[0:l])}, nil +} diff --git a/vendor/github.com/ipfs/go-cid/cid_fuzz.go b/vendor/github.com/ipfs/go-cid/cid_fuzz.go new file mode 100644 index 0000000000..99842b5350 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/cid_fuzz.go @@ -0,0 +1,37 @@ +// +build gofuzz + +package cid + +func Fuzz(data []byte) int { + cid, err := Cast(data) + + if err != nil { + return 0 + } + + _ = cid.Bytes() + _ = cid.String() + p := cid.Prefix() + _ = p.Bytes() + + if !cid.Equals(cid) { + panic("inequality") + } + + // json loop + json, err := cid.MarshalJSON() + if err != nil { + panic(err.Error()) + } + cid2 := Cid{} + err = cid2.UnmarshalJSON(json) + if err != nil { + panic(err.Error()) + } + + if !cid.Equals(cid2) { + panic("json loop not equal") + } + + return 1 +} diff --git a/vendor/github.com/ipfs/go-cid/codecov.yml b/vendor/github.com/ipfs/go-cid/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/ipfs/go-cid/deprecated.go b/vendor/github.com/ipfs/go-cid/deprecated.go new file mode 100644 index 0000000000..cd889f984a --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/deprecated.go @@ -0,0 +1,28 @@ +package cid + +import ( + mh "github.com/multiformats/go-multihash" +) + +// NewPrefixV0 returns a CIDv0 prefix with the specified multihash type. +// DEPRECATED: Use V0Builder +func NewPrefixV0(mhType uint64) Prefix { + return Prefix{ + MhType: mhType, + MhLength: mh.DefaultLengths[mhType], + Version: 0, + Codec: DagProtobuf, + } +} + +// NewPrefixV1 returns a CIDv1 prefix with the specified codec and multihash +// type. +// DEPRECATED: Use V1Builder +func NewPrefixV1(codecType uint64, mhType uint64) Prefix { + return Prefix{ + MhType: mhType, + MhLength: mh.DefaultLengths[mhType], + Version: 1, + Codec: codecType, + } +} diff --git a/vendor/github.com/ipfs/go-cid/go.mod b/vendor/github.com/ipfs/go-cid/go.mod new file mode 100644 index 0000000000..e2b9478321 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/go.mod @@ -0,0 +1,9 @@ +module github.com/ipfs/go-cid + +require ( + github.com/multiformats/go-multibase v0.0.3 + github.com/multiformats/go-multihash v0.0.13 + github.com/multiformats/go-varint v0.0.5 +) + +go 1.13 diff --git a/vendor/github.com/ipfs/go-cid/go.sum b/vendor/github.com/ipfs/go-cid/go.sum new file mode 100644 index 0000000000..087adb3638 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/go.sum @@ -0,0 +1,28 @@ +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/ipfs/go-cid/set.go b/vendor/github.com/ipfs/go-cid/set.go new file mode 100644 index 0000000000..eb3b3f0dc1 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/set.go @@ -0,0 +1,65 @@ +package cid + +// Set is a implementation of a set of Cids, that is, a structure +// to which holds a single copy of every Cids that is added to it. +type Set struct { + set map[Cid]struct{} +} + +// NewSet initializes and returns a new Set. +func NewSet() *Set { + return &Set{set: make(map[Cid]struct{})} +} + +// Add puts a Cid in the Set. +func (s *Set) Add(c Cid) { + s.set[c] = struct{}{} +} + +// Has returns if the Set contains a given Cid. +func (s *Set) Has(c Cid) bool { + _, ok := s.set[c] + return ok +} + +// Remove deletes a Cid from the Set. +func (s *Set) Remove(c Cid) { + delete(s.set, c) +} + +// Len returns how many elements the Set has. +func (s *Set) Len() int { + return len(s.set) +} + +// Keys returns the Cids in the set. +func (s *Set) Keys() []Cid { + out := make([]Cid, 0, len(s.set)) + for k := range s.set { + out = append(out, k) + } + return out +} + +// Visit adds a Cid to the set only if it is +// not in it already. +func (s *Set) Visit(c Cid) bool { + if !s.Has(c) { + s.Add(c) + return true + } + + return false +} + +// ForEach allows to run a custom function on each +// Cid in the set. +func (s *Set) ForEach(f func(c Cid) error) error { + for c := range s.set { + err := f(c) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/ipfs/go-cid/varint.go b/vendor/github.com/ipfs/go-cid/varint.go new file mode 100644 index 0000000000..ed5eba1ad2 --- /dev/null +++ b/vendor/github.com/ipfs/go-cid/varint.go @@ -0,0 +1,40 @@ +package cid + +import ( + "github.com/multiformats/go-varint" +) + +// Version of varint function that work with a string rather than +// []byte to avoid unnecessary allocation + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license as given at https://golang.org/LICENSE + +// uvarint decodes a uint64 from buf and returns that value and the +// number of characters read (> 0). If an error occurred, the value is 0 +// and the number of bytes n is <= 0 meaning: +// +// n == 0: buf too small +// n < 0: value larger than 64 bits (overflow) +// and -n is the number of bytes read +// +func uvarint(buf string) (uint64, int, error) { + var x uint64 + var s uint + // we have a binary string so we can't use a range loope + for i := 0; i < len(buf); i++ { + b := buf[i] + if b < 0x80 { + if i > 9 || i == 9 && b > 1 { + return 0, 0, varint.ErrOverflow + } else if b == 0 && i > 0 { + return 0, 0, varint.ErrNotMinimal + } + return x | uint64(b)< key-value datastore interfaces + +## Lead Maintainer + +[Steven Allen](https://github.com/Stebalien) + +## Table of Contents + +- [Background](#background) +- [Documentation](#documentation) +- [Contribute](#contribute) +- [License](#license) + +## Background + +Datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime. + +In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding). + +Based on [datastore.py](https://github.com/datastore/datastore). + +## Documentation + +https://godoc.org/github.com/ipfs/go-datastore + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-datastore/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + +## License + +MIT + diff --git a/vendor/github.com/ipfs/go-datastore/basic_ds.go b/vendor/github.com/ipfs/go-datastore/basic_ds.go new file mode 100644 index 0000000000..4c851875da --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/basic_ds.go @@ -0,0 +1,287 @@ +package datastore + +import ( + "log" + + dsq "github.com/ipfs/go-datastore/query" +) + +// Here are some basic datastore implementations. + +// MapDatastore uses a standard Go map for internal storage. +type MapDatastore struct { + values map[Key][]byte +} + +// NewMapDatastore constructs a MapDatastore. It is _not_ thread-safe by +// default, wrap using sync.MutexWrap if you need thread safety (the answer here +// is usually yes). +func NewMapDatastore() (d *MapDatastore) { + return &MapDatastore{ + values: make(map[Key][]byte), + } +} + +// Put implements Datastore.Put +func (d *MapDatastore) Put(key Key, value []byte) (err error) { + d.values[key] = value + return nil +} + +// Sync implements Datastore.Sync +func (d *MapDatastore) Sync(prefix Key) error { + return nil +} + +// Get implements Datastore.Get +func (d *MapDatastore) Get(key Key) (value []byte, err error) { + val, found := d.values[key] + if !found { + return nil, ErrNotFound + } + return val, nil +} + +// Has implements Datastore.Has +func (d *MapDatastore) Has(key Key) (exists bool, err error) { + _, found := d.values[key] + return found, nil +} + +// GetSize implements Datastore.GetSize +func (d *MapDatastore) GetSize(key Key) (size int, err error) { + if v, found := d.values[key]; found { + return len(v), nil + } + return -1, ErrNotFound +} + +// Delete implements Datastore.Delete +func (d *MapDatastore) Delete(key Key) (err error) { + delete(d.values, key) + return nil +} + +// Query implements Datastore.Query +func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) { + re := make([]dsq.Entry, 0, len(d.values)) + for k, v := range d.values { + e := dsq.Entry{Key: k.String(), Size: len(v)} + if !q.KeysOnly { + e.Value = v + } + re = append(re, e) + } + r := dsq.ResultsWithEntries(q, re) + r = dsq.NaiveQueryApply(q, r) + return r, nil +} + +func (d *MapDatastore) Batch() (Batch, error) { + return NewBasicBatch(d), nil +} + +func (d *MapDatastore) Close() error { + return nil +} + +// NullDatastore stores nothing, but conforms to the API. +// Useful to test with. +type NullDatastore struct { +} + +// NewNullDatastore constructs a null datastoe +func NewNullDatastore() *NullDatastore { + return &NullDatastore{} +} + +// Put implements Datastore.Put +func (d *NullDatastore) Put(key Key, value []byte) (err error) { + return nil +} + +// Sync implements Datastore.Sync +func (d *NullDatastore) Sync(prefix Key) error { + return nil +} + +// Get implements Datastore.Get +func (d *NullDatastore) Get(key Key) (value []byte, err error) { + return nil, ErrNotFound +} + +// Has implements Datastore.Has +func (d *NullDatastore) Has(key Key) (exists bool, err error) { + return false, nil +} + +// Has implements Datastore.GetSize +func (d *NullDatastore) GetSize(key Key) (size int, err error) { + return -1, ErrNotFound +} + +// Delete implements Datastore.Delete +func (d *NullDatastore) Delete(key Key) (err error) { + return nil +} + +// Query implements Datastore.Query +func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) { + return dsq.ResultsWithEntries(q, nil), nil +} + +func (d *NullDatastore) Batch() (Batch, error) { + return NewBasicBatch(d), nil +} + +func (d *NullDatastore) Close() error { + return nil +} + +// LogDatastore logs all accesses through the datastore. +type LogDatastore struct { + Name string + child Datastore +} + +// Shim is a datastore which has a child. +type Shim interface { + Datastore + + Children() []Datastore +} + +// NewLogDatastore constructs a log datastore. +func NewLogDatastore(ds Datastore, name string) *LogDatastore { + if len(name) < 1 { + name = "LogDatastore" + } + return &LogDatastore{Name: name, child: ds} +} + +// Children implements Shim +func (d *LogDatastore) Children() []Datastore { + return []Datastore{d.child} +} + +// Put implements Datastore.Put +func (d *LogDatastore) Put(key Key, value []byte) (err error) { + log.Printf("%s: Put %s\n", d.Name, key) + // log.Printf("%s: Put %s ```%s```", d.Name, key, value) + return d.child.Put(key, value) +} + +// Sync implements Datastore.Sync +func (d *LogDatastore) Sync(prefix Key) error { + log.Printf("%s: Sync %s\n", d.Name, prefix) + return d.child.Sync(prefix) +} + +// Get implements Datastore.Get +func (d *LogDatastore) Get(key Key) (value []byte, err error) { + log.Printf("%s: Get %s\n", d.Name, key) + return d.child.Get(key) +} + +// Has implements Datastore.Has +func (d *LogDatastore) Has(key Key) (exists bool, err error) { + log.Printf("%s: Has %s\n", d.Name, key) + return d.child.Has(key) +} + +// GetSize implements Datastore.GetSize +func (d *LogDatastore) GetSize(key Key) (size int, err error) { + log.Printf("%s: GetSize %s\n", d.Name, key) + return d.child.GetSize(key) +} + +// Delete implements Datastore.Delete +func (d *LogDatastore) Delete(key Key) (err error) { + log.Printf("%s: Delete %s\n", d.Name, key) + return d.child.Delete(key) +} + +// DiskUsage implements the PersistentDatastore interface. +func (d *LogDatastore) DiskUsage() (uint64, error) { + log.Printf("%s: DiskUsage\n", d.Name) + return DiskUsage(d.child) +} + +// Query implements Datastore.Query +func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) { + log.Printf("%s: Query\n", d.Name) + log.Printf("%s: q.Prefix: %s\n", d.Name, q.Prefix) + log.Printf("%s: q.KeysOnly: %v\n", d.Name, q.KeysOnly) + log.Printf("%s: q.Filters: %d\n", d.Name, len(q.Filters)) + log.Printf("%s: q.Orders: %d\n", d.Name, len(q.Orders)) + log.Printf("%s: q.Offset: %d\n", d.Name, q.Offset) + + return d.child.Query(q) +} + +// LogBatch logs all accesses through the batch. +type LogBatch struct { + Name string + child Batch +} + +func (d *LogDatastore) Batch() (Batch, error) { + log.Printf("%s: Batch\n", d.Name) + if bds, ok := d.child.(Batching); ok { + b, err := bds.Batch() + + if err != nil { + return nil, err + } + return &LogBatch{ + Name: d.Name, + child: b, + }, nil + } + return nil, ErrBatchUnsupported +} + +// Put implements Batch.Put +func (d *LogBatch) Put(key Key, value []byte) (err error) { + log.Printf("%s: BatchPut %s\n", d.Name, key) + // log.Printf("%s: Put %s ```%s```", d.Name, key, value) + return d.child.Put(key, value) +} + +// Delete implements Batch.Delete +func (d *LogBatch) Delete(key Key) (err error) { + log.Printf("%s: BatchDelete %s\n", d.Name, key) + return d.child.Delete(key) +} + +// Commit implements Batch.Commit +func (d *LogBatch) Commit() (err error) { + log.Printf("%s: BatchCommit\n", d.Name) + return d.child.Commit() +} + +func (d *LogDatastore) Close() error { + log.Printf("%s: Close\n", d.Name) + return d.child.Close() +} + +func (d *LogDatastore) Check() error { + if c, ok := d.child.(CheckedDatastore); ok { + return c.Check() + } + return nil +} + +func (d *LogDatastore) Scrub() error { + if c, ok := d.child.(ScrubbedDatastore); ok { + return c.Scrub() + } + return nil +} + +func (d *LogDatastore) CollectGarbage() error { + if c, ok := d.child.(GCDatastore); ok { + return c.CollectGarbage() + } + return nil +} diff --git a/vendor/github.com/ipfs/go-datastore/batch.go b/vendor/github.com/ipfs/go-datastore/batch.go new file mode 100644 index 0000000000..41e23ffe44 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/batch.go @@ -0,0 +1,47 @@ +package datastore + +type op struct { + delete bool + value []byte +} + +// basicBatch implements the transaction interface for datastores who do +// not have any sort of underlying transactional support +type basicBatch struct { + ops map[Key]op + + target Datastore +} + +func NewBasicBatch(ds Datastore) Batch { + return &basicBatch{ + ops: make(map[Key]op), + target: ds, + } +} + +func (bt *basicBatch) Put(key Key, val []byte) error { + bt.ops[key] = op{value: val} + return nil +} + +func (bt *basicBatch) Delete(key Key) error { + bt.ops[key] = op{delete: true} + return nil +} + +func (bt *basicBatch) Commit() error { + var err error + for k, op := range bt.ops { + if op.delete { + err = bt.target.Delete(k) + } else { + err = bt.target.Put(k, op.value) + } + if err != nil { + break + } + } + + return err +} diff --git a/vendor/github.com/ipfs/go-datastore/datastore.go b/vendor/github.com/ipfs/go-datastore/datastore.go new file mode 100644 index 0000000000..04ca726c4a --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/datastore.go @@ -0,0 +1,251 @@ +package datastore + +import ( + "errors" + "io" + "time" + + query "github.com/ipfs/go-datastore/query" +) + +/* +Datastore represents storage for any key-value pair. + +Datastores are general enough to be backed by all kinds of different storage: +in-memory caches, databases, a remote datastore, flat files on disk, etc. + +The general idea is to wrap a more complicated storage facility in a simple, +uniform interface, keeping the freedom of using the right tools for the job. +In particular, a Datastore can aggregate other datastores in interesting ways, +like sharded (to distribute load) or tiered access (caches before databases). + +While Datastores should be written general enough to accept all sorts of +values, some implementations will undoubtedly have to be specific (e.g. SQL +databases where fields should be decomposed into columns), particularly to +support queries efficiently. Moreover, certain datastores may enforce certain +types of values (e.g. requiring an io.Reader, a specific struct, etc) or +serialization formats (JSON, Protobufs, etc). + +IMPORTANT: No Datastore should ever Panic! This is a cross-module interface, +and thus it should behave predictably and handle exceptional conditions with +proper error reporting. Thus, all Datastore calls may return errors, which +should be checked by callers. +*/ +type Datastore interface { + Read + Write + // Sync guarantees that any Put or Delete calls under prefix that returned + // before Sync(prefix) was called will be observed after Sync(prefix) + // returns, even if the program crashes. If Put/Delete operations already + // satisfy these requirements then Sync may be a no-op. + // + // If the prefix fails to Sync this method returns an error. + Sync(prefix Key) error + io.Closer +} + +// Write is the write-side of the Datastore interface. +type Write interface { + // Put stores the object `value` named by `key`. + // + // The generalized Datastore interface does not impose a value type, + // allowing various datastore middleware implementations (which do not + // handle the values directly) to be composed together. + // + // Ultimately, the lowest-level datastore will need to do some value checking + // or risk getting incorrect values. It may also be useful to expose a more + // type-safe interface to your application, and do the checking up-front. + Put(key Key, value []byte) error + + // Delete removes the value for given `key`. If the key is not in the + // datastore, this method returns no error. + Delete(key Key) error +} + +// Read is the read-side of the Datastore interface. +type Read interface { + // Get retrieves the object `value` named by `key`. + // Get will return ErrNotFound if the key is not mapped to a value. + Get(key Key) (value []byte, err error) + + // Has returns whether the `key` is mapped to a `value`. + // In some contexts, it may be much cheaper only to check for existence of + // a value, rather than retrieving the value itself. (e.g. HTTP HEAD). + // The default implementation is found in `GetBackedHas`. + Has(key Key) (exists bool, err error) + + // GetSize returns the size of the `value` named by `key`. + // In some contexts, it may be much cheaper to only get the size of the + // value rather than retrieving the value itself. + GetSize(key Key) (size int, err error) + + // Query searches the datastore and returns a query result. This function + // may return before the query actually runs. To wait for the query: + // + // result, _ := ds.Query(q) + // + // // use the channel interface; result may come in at different times + // for entry := range result.Next() { ... } + // + // // or wait for the query to be completely done + // entries, _ := result.Rest() + // for entry := range entries { ... } + // + Query(q query.Query) (query.Results, error) +} + +// Batching datastores support deferred, grouped updates to the database. +// `Batch`es do NOT have transactional semantics: updates to the underlying +// datastore are not guaranteed to occur in the same iota of time. Similarly, +// batched updates will not be flushed to the underlying datastore until +// `Commit` has been called. `Txn`s from a `TxnDatastore` have all the +// capabilities of a `Batch`, but the reverse is NOT true. +type Batching interface { + Datastore + + Batch() (Batch, error) +} + +// ErrBatchUnsupported is returned if the by Batch if the Datastore doesn't +// actually support batching. +var ErrBatchUnsupported = errors.New("this datastore does not support batching") + +// CheckedDatastore is an interface that should be implemented by datastores +// which may need checking on-disk data integrity. +type CheckedDatastore interface { + Datastore + + Check() error +} + +// ScrubbedDatastore is an interface that should be implemented by datastores +// which want to provide a mechanism to check data integrity and/or +// error correction. +type ScrubbedDatastore interface { + Datastore + + Scrub() error +} + +// GCDatastore is an interface that should be implemented by datastores which +// don't free disk space by just removing data from them. +type GCDatastore interface { + Datastore + + CollectGarbage() error +} + +// PersistentDatastore is an interface that should be implemented by datastores +// which can report disk usage. +type PersistentDatastore interface { + Datastore + + // DiskUsage returns the space used by a datastore, in bytes. + DiskUsage() (uint64, error) +} + +// DiskUsage checks if a Datastore is a +// PersistentDatastore and returns its DiskUsage(), +// otherwise returns 0. +func DiskUsage(d Datastore) (uint64, error) { + persDs, ok := d.(PersistentDatastore) + if !ok { + return 0, nil + } + return persDs.DiskUsage() +} + +// TTLDatastore is an interface that should be implemented by datastores that +// support expiring entries. +type TTLDatastore interface { + Datastore + TTL +} + +// TTL encapulates the methods that deal with entries with time-to-live. +type TTL interface { + PutWithTTL(key Key, value []byte, ttl time.Duration) error + SetTTL(key Key, ttl time.Duration) error + GetExpiration(key Key) (time.Time, error) +} + +// Txn extends the Datastore type. Txns allow users to batch queries and +// mutations to the Datastore into atomic groups, or transactions. Actions +// performed on a transaction will not take hold until a successful call to +// Commit has been made. Likewise, transactions can be aborted by calling +// Discard before a successful Commit has been made. +type Txn interface { + Read + Write + + // Commit finalizes a transaction, attempting to commit it to the Datastore. + // May return an error if the transaction has gone stale. The presence of an + // error is an indication that the data was not committed to the Datastore. + Commit() error + // Discard throws away changes recorded in a transaction without committing + // them to the underlying Datastore. Any calls made to Discard after Commit + // has been successfully called will have no effect on the transaction and + // state of the Datastore, making it safe to defer. + Discard() +} + +// TxnDatastore is an interface that should be implemented by datastores that +// support transactions. +type TxnDatastore interface { + Datastore + + NewTransaction(readOnly bool) (Txn, error) +} + +// Errors + +type dsError struct { + error + isNotFound bool +} + +func (e *dsError) NotFound() bool { + return e.isNotFound +} + +// ErrNotFound is returned by Get and GetSize when a datastore does not map the +// given key to a value. +var ErrNotFound error = &dsError{error: errors.New("datastore: key not found"), isNotFound: true} + +// GetBackedHas provides a default Datastore.Has implementation. +// It exists so Datastore.Has implementations can use it, like so: +// +// func (*d SomeDatastore) Has(key Key) (exists bool, err error) { +// return GetBackedHas(d, key) +// } +func GetBackedHas(ds Read, key Key) (bool, error) { + _, err := ds.Get(key) + switch err { + case nil: + return true, nil + case ErrNotFound: + return false, nil + default: + return false, err + } +} + +// GetBackedSize provides a default Datastore.GetSize implementation. +// It exists so Datastore.GetSize implementations can use it, like so: +// +// func (*d SomeDatastore) GetSize(key Key) (size int, err error) { +// return GetBackedSize(d, key) +// } +func GetBackedSize(ds Read, key Key) (int, error) { + value, err := ds.Get(key) + if err == nil { + return len(value), nil + } + return -1, err +} + +type Batch interface { + Write + + Commit() error +} diff --git a/vendor/github.com/ipfs/go-datastore/go.mod b/vendor/github.com/ipfs/go-datastore/go.mod new file mode 100644 index 0000000000..709a9c1268 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/go.mod @@ -0,0 +1,12 @@ +module github.com/ipfs/go-datastore + +require ( + github.com/google/uuid v1.1.1 + github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8 + github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 + github.com/kr/pretty v0.1.0 // indirect + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 +) + +go 1.12 diff --git a/vendor/github.com/ipfs/go-datastore/go.sum b/vendor/github.com/ipfs/go-datastore/go.sum new file mode 100644 index 0000000000..d1289721f2 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/go.sum @@ -0,0 +1,16 @@ +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8 h1:NAviDvJ0WXgD+yiL2Rj35AmnfgI11+pHXbdciD917U0= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/ipfs/go-datastore/key.go b/vendor/github.com/ipfs/go-datastore/key.go new file mode 100644 index 0000000000..42cea30844 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/key.go @@ -0,0 +1,309 @@ +package datastore + +import ( + "encoding/json" + "path" + "strings" + + dsq "github.com/ipfs/go-datastore/query" + + "github.com/google/uuid" +) + +/* +A Key represents the unique identifier of an object. +Our Key scheme is inspired by file systems and Google App Engine key model. + +Keys are meant to be unique across a system. Keys are hierarchical, +incorporating more and more specific namespaces. Thus keys can be deemed +'children' or 'ancestors' of other keys:: + + Key("/Comedy") + Key("/Comedy/MontyPython") + +Also, every namespace can be parametrized to embed relevant object +information. For example, the Key `name` (most specific namespace) could +include the object type:: + + Key("/Comedy/MontyPython/Actor:JohnCleese") + Key("/Comedy/MontyPython/Sketch:CheeseShop") + Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender") + +*/ +type Key struct { + string +} + +// NewKey constructs a key from string. it will clean the value. +func NewKey(s string) Key { + k := Key{s} + k.Clean() + return k +} + +// RawKey creates a new Key without safety checking the input. Use with care. +func RawKey(s string) Key { + // accept an empty string and fix it to avoid special cases + // elsewhere + if len(s) == 0 { + return Key{"/"} + } + + // perform a quick sanity check that the key is in the correct + // format, if it is not then it is a programmer error and it is + // okay to panic + if len(s) == 0 || s[0] != '/' || (len(s) > 1 && s[len(s)-1] == '/') { + panic("invalid datastore key: " + s) + } + + return Key{s} +} + +// KeyWithNamespaces constructs a key out of a namespace slice. +func KeyWithNamespaces(ns []string) Key { + return NewKey(strings.Join(ns, "/")) +} + +// Clean up a Key, using path.Clean. +func (k *Key) Clean() { + switch { + case len(k.string) == 0: + k.string = "/" + case k.string[0] == '/': + k.string = path.Clean(k.string) + default: + k.string = path.Clean("/" + k.string) + } +} + +// Strings is the string value of Key +func (k Key) String() string { + return k.string +} + +// Bytes returns the string value of Key as a []byte +func (k Key) Bytes() []byte { + return []byte(k.string) +} + +// Equal checks equality of two keys +func (k Key) Equal(k2 Key) bool { + return k.string == k2.string +} + +// Less checks whether this key is sorted lower than another. +func (k Key) Less(k2 Key) bool { + list1 := k.List() + list2 := k2.List() + for i, c1 := range list1 { + if len(list2) < (i + 1) { + return false + } + + c2 := list2[i] + if c1 < c2 { + return true + } else if c1 > c2 { + return false + } + // c1 == c2, continue + } + + // list1 is shorter or exactly the same. + return len(list1) < len(list2) +} + +// List returns the `list` representation of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List() +// ["Comedy", "MontyPythong", "Actor:JohnCleese"] +func (k Key) List() []string { + return strings.Split(k.string, "/")[1:] +} + +// Reverse returns the reverse of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse() +// NewKey("/Actor:JohnCleese/MontyPython/Comedy") +func (k Key) Reverse() Key { + l := k.List() + r := make([]string, len(l)) + for i, e := range l { + r[len(l)-i-1] = e + } + return KeyWithNamespaces(r) +} + +// Namespaces returns the `namespaces` making up this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Namespaces() +// ["Comedy", "MontyPython", "Actor:JohnCleese"] +func (k Key) Namespaces() []string { + return k.List() +} + +// BaseNamespace returns the "base" namespace of this key (path.Base(filename)) +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace() +// "Actor:JohnCleese" +func (k Key) BaseNamespace() string { + n := k.Namespaces() + return n[len(n)-1] +} + +// Type returns the "type" of this key (value of last namespace). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Type() +// "Actor" +func (k Key) Type() string { + return NamespaceType(k.BaseNamespace()) +} + +// Name returns the "name" of this key (field of last namespace). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Name() +// "JohnCleese" +func (k Key) Name() string { + return NamespaceValue(k.BaseNamespace()) +} + +// Instance returns an "instance" of this type key (appends value to namespace). +// NewKey("/Comedy/MontyPython/Actor").Instance("JohnClesse") +// NewKey("/Comedy/MontyPython/Actor:JohnCleese") +func (k Key) Instance(s string) Key { + return NewKey(k.string + ":" + s) +} + +// Path returns the "path" of this key (parent + type). +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path() +// NewKey("/Comedy/MontyPython/Actor") +func (k Key) Path() Key { + s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace()) + return NewKey(s) +} + +// Parent returns the `parent` Key of this Key. +// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent() +// NewKey("/Comedy/MontyPython") +func (k Key) Parent() Key { + n := k.List() + if len(n) == 1 { + return RawKey("/") + } + return NewKey(strings.Join(n[:len(n)-1], "/")) +} + +// Child returns the `child` Key of this Key. +// NewKey("/Comedy/MontyPython").Child(NewKey("Actor:JohnCleese")) +// NewKey("/Comedy/MontyPython/Actor:JohnCleese") +func (k Key) Child(k2 Key) Key { + switch { + case k.string == "/": + return k2 + case k2.string == "/": + return k + default: + return RawKey(k.string + k2.string) + } +} + +// ChildString returns the `child` Key of this Key -- string helper. +// NewKey("/Comedy/MontyPython").ChildString("Actor:JohnCleese") +// NewKey("/Comedy/MontyPython/Actor:JohnCleese") +func (k Key) ChildString(s string) Key { + return NewKey(k.string + "/" + s) +} + +// IsAncestorOf returns whether this key is a prefix of `other` +// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython") +// true +func (k Key) IsAncestorOf(other Key) bool { + // equivalent to HasPrefix(other, k.string + "/") + + if len(other.string) <= len(k.string) { + // We're not long enough to be a child. + return false + } + + if k.string == "/" { + // We're the root and the other key is longer. + return true + } + + // "other" starts with /k.string/ + return other.string[len(k.string)] == '/' && other.string[:len(k.string)] == k.string +} + +// IsDescendantOf returns whether this key contains another as a prefix. +// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy") +// true +func (k Key) IsDescendantOf(other Key) bool { + return other.IsAncestorOf(k) +} + +// IsTopLevel returns whether this key has only one namespace. +func (k Key) IsTopLevel() bool { + return len(k.List()) == 1 +} + +// MarshalJSON implements the json.Marshaler interface, +// keys are represented as JSON strings +func (k Key) MarshalJSON() ([]byte, error) { + return json.Marshal(k.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface, +// keys will parse any value specified as a key to a string +func (k *Key) UnmarshalJSON(data []byte) error { + var key string + if err := json.Unmarshal(data, &key); err != nil { + return err + } + *k = NewKey(key) + return nil +} + +// RandomKey returns a randomly (uuid) generated key. +// RandomKey() +// NewKey("/f98719ea086343f7b71f32ea9d9d521d") +func RandomKey() Key { + return NewKey(strings.Replace(uuid.New().String(), "-", "", -1)) +} + +/* +A Key Namespace is like a path element. +A namespace can optionally include a type (delimited by ':') + + > NamespaceValue("Song:PhilosopherSong") + PhilosopherSong + > NamespaceType("Song:PhilosopherSong") + Song + > NamespaceType("Music:Song:PhilosopherSong") + Music:Song +*/ + +// NamespaceType is the first component of a namespace. `foo` in `foo:bar` +func NamespaceType(namespace string) string { + parts := strings.Split(namespace, ":") + if len(parts) < 2 { + return "" + } + return strings.Join(parts[0:len(parts)-1], ":") +} + +// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz` +func NamespaceValue(namespace string) string { + parts := strings.Split(namespace, ":") + return parts[len(parts)-1] +} + +// KeySlice attaches the methods of sort.Interface to []Key, +// sorting in increasing order. +type KeySlice []Key + +func (p KeySlice) Len() int { return len(p) } +func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) } +func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// EntryKeys +func EntryKeys(e []dsq.Entry) []Key { + ks := make([]Key, len(e)) + for i, e := range e { + ks[i] = NewKey(e.Key) + } + return ks +} diff --git a/vendor/github.com/ipfs/go-datastore/keytransform/doc.go b/vendor/github.com/ipfs/go-datastore/keytransform/doc.go new file mode 100644 index 0000000000..b389dcfaf3 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/keytransform/doc.go @@ -0,0 +1,25 @@ +// Package keytransform introduces a Datastore Shim that transforms keys before +// passing them to its child. It can be used to manipulate what keys look like +// to the user, for example namespacing keys, reversing them, etc. +// +// Use the Wrap function to wrap a datastore with any KeyTransform. +// A KeyTransform is simply an interface with two functions, a conversion and +// its inverse. For example: +// +// import ( +// ktds "github.com/ipfs/go-datastore/keytransform" +// ds "github.com/ipfs/go-datastore" +// ) +// +// func reverseKey(k ds.Key) ds.Key { +// return k.Reverse() +// } +// +// func invertKeys(d ds.Datastore) { +// return ktds.Wrap(d, &ktds.Pair{ +// Convert: reverseKey, +// Invert: reverseKey, // reverse is its own inverse. +// }) +// } +// +package keytransform diff --git a/vendor/github.com/ipfs/go-datastore/keytransform/interface.go b/vendor/github.com/ipfs/go-datastore/keytransform/interface.go new file mode 100644 index 0000000000..4f07967a39 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/keytransform/interface.go @@ -0,0 +1,13 @@ +package keytransform + +import ds "github.com/ipfs/go-datastore" + +// KeyMapping is a function that maps one key to annother +type KeyMapping func(ds.Key) ds.Key + +// KeyTransform is an object with a pair of functions for (invertibly) +// transforming keys +type KeyTransform interface { + ConvertKey(ds.Key) ds.Key + InvertKey(ds.Key) ds.Key +} diff --git a/vendor/github.com/ipfs/go-datastore/keytransform/keytransform.go b/vendor/github.com/ipfs/go-datastore/keytransform/keytransform.go new file mode 100644 index 0000000000..cd03487ca0 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/keytransform/keytransform.go @@ -0,0 +1,260 @@ +package keytransform + +import ( + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" +) + +// Wrap wraps a given datastore with a KeyTransform function. +// The resulting wrapped datastore will use the transform on all Datastore +// operations. +func Wrap(child ds.Datastore, t KeyTransform) *Datastore { + if t == nil { + panic("t (KeyTransform) is nil") + } + + if child == nil { + panic("child (ds.Datastore) is nil") + } + + return &Datastore{child: child, KeyTransform: t} +} + +// Datastore keeps a KeyTransform function +type Datastore struct { + child ds.Datastore + + KeyTransform +} + +// Children implements ds.Shim +func (d *Datastore) Children() []ds.Datastore { + return []ds.Datastore{d.child} +} + +// Put stores the given value, transforming the key first. +func (d *Datastore) Put(key ds.Key, value []byte) (err error) { + return d.child.Put(d.ConvertKey(key), value) +} + +// Sync implements Datastore.Sync +func (d *Datastore) Sync(prefix ds.Key) error { + return d.child.Sync(d.ConvertKey(prefix)) +} + +// Get returns the value for given key, transforming the key first. +func (d *Datastore) Get(key ds.Key) (value []byte, err error) { + return d.child.Get(d.ConvertKey(key)) +} + +// Has returns whether the datastore has a value for a given key, transforming +// the key first. +func (d *Datastore) Has(key ds.Key) (exists bool, err error) { + return d.child.Has(d.ConvertKey(key)) +} + +// GetSize returns the size of the value named by the given key, transforming +// the key first. +func (d *Datastore) GetSize(key ds.Key) (size int, err error) { + return d.child.GetSize(d.ConvertKey(key)) +} + +// Delete removes the value for given key +func (d *Datastore) Delete(key ds.Key) (err error) { + return d.child.Delete(d.ConvertKey(key)) +} + +// Query implements Query, inverting keys on the way back out. +func (d *Datastore) Query(q dsq.Query) (dsq.Results, error) { + nq, cq := d.prepareQuery(q) + + cqr, err := d.child.Query(cq) + if err != nil { + return nil, err + } + + qr := dsq.ResultsFromIterator(q, dsq.Iterator{ + Next: func() (dsq.Result, bool) { + r, ok := cqr.NextSync() + if !ok { + return r, false + } + if r.Error == nil { + r.Entry.Key = d.InvertKey(ds.RawKey(r.Entry.Key)).String() + } + return r, true + }, + Close: func() error { + return cqr.Close() + }, + }) + return dsq.NaiveQueryApply(nq, qr), nil +} + +// Split the query into a child query and a naive query. That way, we can make +// the child datastore do as much work as possible. +func (d *Datastore) prepareQuery(q dsq.Query) (naive, child dsq.Query) { + + // First, put everything in the child query. Then, start taking things + // out. + child = q + + // Always let the child handle the key prefix. + child.Prefix = d.ConvertKey(ds.NewKey(child.Prefix)).String() + + // Check if the key transform is order-preserving so we can use the + // child datastore's built-in ordering. + orderPreserving := false + switch d.KeyTransform.(type) { + case PrefixTransform, *PrefixTransform: + orderPreserving = true + } + + // Try to let the child handle ordering. +orders: + for i, o := range child.Orders { + switch o.(type) { + case dsq.OrderByValue, *dsq.OrderByValue, + dsq.OrderByValueDescending, *dsq.OrderByValueDescending: + // Key doesn't matter. + continue + case dsq.OrderByKey, *dsq.OrderByKey, + dsq.OrderByKeyDescending, *dsq.OrderByKeyDescending: + // if the key transform preserves order, we can delegate + // to the child datastore. + if orderPreserving { + // When sorting, we compare with the first + // Order, then, if equal, we compare with the + // second Order, etc. However, keys are _unique_ + // so we'll never apply any additional orders + // after ordering by key. + child.Orders = child.Orders[:i+1] + break orders + } + } + + // Can't handle this order under transform, punt it to a naive + // ordering. + naive.Orders = q.Orders + child.Orders = nil + naive.Offset = q.Offset + child.Offset = 0 + naive.Limit = q.Limit + child.Limit = 0 + break + } + + // Try to let the child handle the filters. + + // don't modify the original filters. + child.Filters = append([]dsq.Filter(nil), child.Filters...) + + for i, f := range child.Filters { + switch f := f.(type) { + case dsq.FilterValueCompare, *dsq.FilterValueCompare: + continue + case dsq.FilterKeyCompare: + child.Filters[i] = dsq.FilterKeyCompare{ + Op: f.Op, + Key: d.ConvertKey(ds.NewKey(f.Key)).String(), + } + continue + case *dsq.FilterKeyCompare: + child.Filters[i] = &dsq.FilterKeyCompare{ + Op: f.Op, + Key: d.ConvertKey(ds.NewKey(f.Key)).String(), + } + continue + case dsq.FilterKeyPrefix: + child.Filters[i] = dsq.FilterKeyPrefix{ + Prefix: d.ConvertKey(ds.NewKey(f.Prefix)).String(), + } + continue + case *dsq.FilterKeyPrefix: + child.Filters[i] = &dsq.FilterKeyPrefix{ + Prefix: d.ConvertKey(ds.NewKey(f.Prefix)).String(), + } + continue + } + + // Not a known filter, defer to the naive implementation. + naive.Filters = q.Filters + child.Filters = nil + naive.Offset = q.Offset + child.Offset = 0 + naive.Limit = q.Limit + child.Limit = 0 + break + } + return +} + +func (d *Datastore) Close() error { + return d.child.Close() +} + +// DiskUsage implements the PersistentDatastore interface. +func (d *Datastore) DiskUsage() (uint64, error) { + return ds.DiskUsage(d.child) +} + +func (d *Datastore) Batch() (ds.Batch, error) { + bds, ok := d.child.(ds.Batching) + if !ok { + return nil, ds.ErrBatchUnsupported + } + + childbatch, err := bds.Batch() + if err != nil { + return nil, err + } + return &transformBatch{ + dst: childbatch, + f: d.ConvertKey, + }, nil +} + +type transformBatch struct { + dst ds.Batch + + f KeyMapping +} + +func (t *transformBatch) Put(key ds.Key, val []byte) error { + return t.dst.Put(t.f(key), val) +} + +func (t *transformBatch) Delete(key ds.Key) error { + return t.dst.Delete(t.f(key)) +} + +func (t *transformBatch) Commit() error { + return t.dst.Commit() +} + +func (d *Datastore) Check() error { + if c, ok := d.child.(ds.CheckedDatastore); ok { + return c.Check() + } + return nil +} + +func (d *Datastore) Scrub() error { + if c, ok := d.child.(ds.ScrubbedDatastore); ok { + return c.Scrub() + } + return nil +} + +func (d *Datastore) CollectGarbage() error { + if c, ok := d.child.(ds.GCDatastore); ok { + return c.CollectGarbage() + } + return nil +} + +var _ ds.Datastore = (*Datastore)(nil) +var _ ds.GCDatastore = (*Datastore)(nil) +var _ ds.Batching = (*Datastore)(nil) +var _ ds.PersistentDatastore = (*Datastore)(nil) +var _ ds.ScrubbedDatastore = (*Datastore)(nil) diff --git a/vendor/github.com/ipfs/go-datastore/keytransform/transforms.go b/vendor/github.com/ipfs/go-datastore/keytransform/transforms.go new file mode 100644 index 0000000000..cc39897e19 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/keytransform/transforms.go @@ -0,0 +1,49 @@ +package keytransform + +import ds "github.com/ipfs/go-datastore" + +// Pair is a convince struct for constructing a key transform. +type Pair struct { + Convert KeyMapping + Invert KeyMapping +} + +func (t *Pair) ConvertKey(k ds.Key) ds.Key { + return t.Convert(k) +} + +func (t *Pair) InvertKey(k ds.Key) ds.Key { + return t.Invert(k) +} + +var _ KeyTransform = (*Pair)(nil) + +// PrefixTransform constructs a KeyTransform with a pair of functions that +// add or remove the given prefix key. +// +// Warning: will panic if prefix not found when it should be there. This is +// to avoid insidious data inconsistency errors. +type PrefixTransform struct { + Prefix ds.Key +} + +// ConvertKey adds the prefix. +func (p PrefixTransform) ConvertKey(k ds.Key) ds.Key { + return p.Prefix.Child(k) +} + +// InvertKey removes the prefix. panics if prefix not found. +func (p PrefixTransform) InvertKey(k ds.Key) ds.Key { + if p.Prefix.String() == "/" { + return k + } + + if !p.Prefix.IsAncestorOf(k) { + panic("expected prefix not found") + } + + s := k.String()[len(p.Prefix.String()):] + return ds.RawKey(s) +} + +var _ KeyTransform = (*PrefixTransform)(nil) diff --git a/vendor/github.com/ipfs/go-datastore/namespace/doc.go b/vendor/github.com/ipfs/go-datastore/namespace/doc.go new file mode 100644 index 0000000000..9ff9a8ca36 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/namespace/doc.go @@ -0,0 +1,24 @@ +// Package namespace introduces a namespace Datastore Shim, which basically +// mounts the entire child datastore under a prefix. +// +// Use the Wrap function to wrap a datastore with any Key prefix. For example: +// +// import ( +// "fmt" +// +// ds "github.com/ipfs/go-datastore" +// nsds "github.com/ipfs/go-datastore/namespace" +// ) +// +// func main() { +// mp := ds.NewMapDatastore() +// ns := nsds.Wrap(mp, ds.NewKey("/foo/bar")) +// +// // in the Namespace Datastore: +// ns.Put(ds.NewKey("/beep"), "boop") +// v2, _ := ns.Get(ds.NewKey("/beep")) // v2 == "boop" +// +// // and, in the underlying MapDatastore: +// v3, _ := mp.Get(ds.NewKey("/foo/bar/beep")) // v3 == "boop" +// } +package namespace diff --git a/vendor/github.com/ipfs/go-datastore/namespace/namespace.go b/vendor/github.com/ipfs/go-datastore/namespace/namespace.go new file mode 100644 index 0000000000..1913fb790c --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/namespace/namespace.go @@ -0,0 +1,26 @@ +package namespace + +import ( + ds "github.com/ipfs/go-datastore" + ktds "github.com/ipfs/go-datastore/keytransform" +) + +// PrefixTransform constructs a KeyTransform with a pair of functions that +// add or remove the given prefix key. +// +// Warning: will panic if prefix not found when it should be there. This is +// to avoid insidious data inconsistency errors. +// +// DEPRECATED: Use ktds.PrefixTransform directly. +func PrefixTransform(prefix ds.Key) ktds.PrefixTransform { + return ktds.PrefixTransform{Prefix: prefix} +} + +// Wrap wraps a given datastore with a key-prefix. +func Wrap(child ds.Datastore, prefix ds.Key) *ktds.Datastore { + if child == nil { + panic("child (ds.Datastore) is nil") + } + + return ktds.Wrap(child, PrefixTransform(prefix)) +} diff --git a/vendor/github.com/ipfs/go-datastore/query/filter.go b/vendor/github.com/ipfs/go-datastore/query/filter.go new file mode 100644 index 0000000000..1935c4888a --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/filter.go @@ -0,0 +1,102 @@ +package query + +import ( + "bytes" + "fmt" + "strings" +) + +// Filter is an object that tests ResultEntries +type Filter interface { + // Filter returns whether an entry passes the filter + Filter(e Entry) bool +} + +// Op is a comparison operator +type Op string + +var ( + Equal = Op("==") + NotEqual = Op("!=") + GreaterThan = Op(">") + GreaterThanOrEqual = Op(">=") + LessThan = Op("<") + LessThanOrEqual = Op("<=") +) + +// FilterValueCompare is used to signal to datastores they +// should apply internal comparisons. unfortunately, there +// is no way to apply comparisons* to interface{} types in +// Go, so if the datastore doesnt have a special way to +// handle these comparisons, you must provided the +// TypedFilter to actually do filtering. +// +// [*] other than == and !=, which use reflect.DeepEqual. +type FilterValueCompare struct { + Op Op + Value []byte +} + +func (f FilterValueCompare) Filter(e Entry) bool { + cmp := bytes.Compare(e.Value, f.Value) + switch f.Op { + case Equal: + return cmp == 0 + case NotEqual: + return cmp != 0 + case LessThan: + return cmp < 0 + case LessThanOrEqual: + return cmp <= 0 + case GreaterThan: + return cmp > 0 + case GreaterThanOrEqual: + return cmp >= 0 + default: + panic(fmt.Errorf("unknown operation: %s", f.Op)) + } +} + +func (f FilterValueCompare) String() string { + return fmt.Sprintf("VALUE %s %q", f.Op, string(f.Value)) +} + +type FilterKeyCompare struct { + Op Op + Key string +} + +func (f FilterKeyCompare) Filter(e Entry) bool { + switch f.Op { + case Equal: + return e.Key == f.Key + case NotEqual: + return e.Key != f.Key + case GreaterThan: + return e.Key > f.Key + case GreaterThanOrEqual: + return e.Key >= f.Key + case LessThan: + return e.Key < f.Key + case LessThanOrEqual: + return e.Key <= f.Key + default: + panic(fmt.Errorf("unknown op '%s'", f.Op)) + } +} + +func (f FilterKeyCompare) String() string { + return fmt.Sprintf("KEY %s %q", f.Op, f.Key) +} + +type FilterKeyPrefix struct { + Prefix string +} + +func (f FilterKeyPrefix) Filter(e Entry) bool { + return strings.HasPrefix(e.Key, f.Prefix) +} + +func (f FilterKeyPrefix) String() string { + return fmt.Sprintf("PREFIX(%q)", f.Prefix) +} diff --git a/vendor/github.com/ipfs/go-datastore/query/order.go b/vendor/github.com/ipfs/go-datastore/query/order.go new file mode 100644 index 0000000000..19931557e4 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/order.go @@ -0,0 +1,94 @@ +package query + +import ( + "bytes" + "sort" + "strings" +) + +// Order is an object used to order objects +type Order interface { + Compare(a, b Entry) int +} + +// OrderByFunction orders the results based on the result of the given function. +type OrderByFunction func(a, b Entry) int + +func (o OrderByFunction) Compare(a, b Entry) int { + return o(a, b) +} + +func (OrderByFunction) String() string { + return "FN" +} + +// OrderByValue is used to signal to datastores they should apply internal +// orderings. +type OrderByValue struct{} + +func (o OrderByValue) Compare(a, b Entry) int { + return bytes.Compare(a.Value, b.Value) +} + +func (OrderByValue) String() string { + return "VALUE" +} + +// OrderByValueDescending is used to signal to datastores they +// should apply internal orderings. +type OrderByValueDescending struct{} + +func (o OrderByValueDescending) Compare(a, b Entry) int { + return -bytes.Compare(a.Value, b.Value) +} + +func (OrderByValueDescending) String() string { + return "desc(VALUE)" +} + +// OrderByKey +type OrderByKey struct{} + +func (o OrderByKey) Compare(a, b Entry) int { + return strings.Compare(a.Key, b.Key) +} + +func (OrderByKey) String() string { + return "KEY" +} + +// OrderByKeyDescending +type OrderByKeyDescending struct{} + +func (o OrderByKeyDescending) Compare(a, b Entry) int { + return -strings.Compare(a.Key, b.Key) +} + +func (OrderByKeyDescending) String() string { + return "desc(KEY)" +} + +// Less returns true if a comes before b with the requested orderings. +func Less(orders []Order, a, b Entry) bool { + for _, cmp := range orders { + switch cmp.Compare(a, b) { + case 0: + case -1: + return true + case 1: + return false + } + } + + // This gives us a *stable* sort for free. We don't care + // preserving the order from the underlying datastore + // because it's undefined. + return a.Key < b.Key +} + +// Sort sorts the given entries using the given orders. +func Sort(orders []Order, entries []Entry) { + sort.Slice(entries, func(i int, j int) bool { + return Less(orders, entries[i], entries[j]) + }) +} diff --git a/vendor/github.com/ipfs/go-datastore/query/query.go b/vendor/github.com/ipfs/go-datastore/query/query.go new file mode 100644 index 0000000000..a390e5bf38 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/query.go @@ -0,0 +1,426 @@ +package query + +import ( + "fmt" + "time" + + goprocess "github.com/jbenet/goprocess" +) + +/* +Query represents storage for any key-value pair. + +tl;dr: + + queries are supported across datastores. + Cheap on top of relational dbs, and expensive otherwise. + Pick the right tool for the job! + +In addition to the key-value store get and set semantics, datastore +provides an interface to retrieve multiple records at a time through +the use of queries. The datastore Query model gleans a common set of +operations performed when querying. To avoid pasting here years of +database research, let’s summarize the operations datastore supports. + +Query Operations, applied in-order: + + * prefix - scope the query to a given path prefix + * filters - select a subset of values by applying constraints + * orders - sort the results by applying sort conditions, hierarchically. + * offset - skip a number of results (for efficient pagination) + * limit - impose a numeric limit on the number of results + +Datastore combines these operations into a simple Query class that allows +applications to define their constraints in a simple, generic, way without +introducing datastore specific calls, languages, etc. + +However, take heed: not all datastores support efficiently performing these +operations. Pick a datastore based on your needs. If you need efficient look-ups, +go for a simple key/value store. If you need efficient queries, consider an SQL +backed datastore. + +Notes: + + * Prefix: When a query filters by prefix, it selects keys that are strict + children of the prefix. For example, a prefix "/foo" would select "/foo/bar" + but not "/foobar" or "/foo", + * Orders: Orders are applied hierarchically. Results are sorted by the first + ordering, then entries equal under the first ordering are sorted with the + second ordering, etc. + * Limits & Offset: Limits and offsets are applied after everything else. +*/ +type Query struct { + Prefix string // namespaces the query to results whose keys have Prefix + Filters []Filter // filter results. apply sequentially + Orders []Order // order results. apply hierarchically + Limit int // maximum number of results + Offset int // skip given number of results + KeysOnly bool // return only keys. + ReturnExpirations bool // return expirations (see TTLDatastore) + ReturnsSizes bool // always return sizes. If not set, datastore impl can return + // // it anyway if it doesn't involve a performance cost. If KeysOnly + // // is not set, Size should always be set. +} + +// String returns a string representation of the Query for debugging/validation +// purposes. Do not use it for SQL queries. +func (q Query) String() string { + s := "SELECT keys" + if !q.KeysOnly { + s += ",vals" + } + if q.ReturnExpirations { + s += ",exps" + } + + s += " " + + if q.Prefix != "" { + s += fmt.Sprintf("FROM %q ", q.Prefix) + } + + if len(q.Filters) > 0 { + s += fmt.Sprintf("FILTER [%s", q.Filters[0]) + for _, f := range q.Filters[1:] { + s += fmt.Sprintf(", %s", f) + } + s += "] " + } + + if len(q.Orders) > 0 { + s += fmt.Sprintf("ORDER [%s", q.Orders[0]) + for _, f := range q.Orders[1:] { + s += fmt.Sprintf(", %s", f) + } + s += "] " + } + + if q.Offset > 0 { + s += fmt.Sprintf("OFFSET %d ", q.Offset) + } + + if q.Limit > 0 { + s += fmt.Sprintf("LIMIT %d ", q.Limit) + } + // Will always end with a space, strip it. + return s[:len(s)-1] +} + +// Entry is a query result entry. +type Entry struct { + Key string // cant be ds.Key because circular imports ...!!! + Value []byte // Will be nil if KeysOnly has been passed. + Expiration time.Time // Entry expiration timestamp if requested and supported (see TTLDatastore). + Size int // Might be -1 if the datastore doesn't support listing the size with KeysOnly + // // or if ReturnsSizes is not set +} + +// Result is a special entry that includes an error, so that the client +// may be warned about internal errors. If Error is non-nil, Entry must be +// empty. +type Result struct { + Entry + + Error error +} + +// Results is a set of Query results. This is the interface for clients. +// Example: +// +// qr, _ := myds.Query(q) +// for r := range qr.Next() { +// if r.Error != nil { +// // handle. +// break +// } +// +// fmt.Println(r.Entry.Key, r.Entry.Value) +// } +// +// or, wait on all results at once: +// +// qr, _ := myds.Query(q) +// es, _ := qr.Rest() +// for _, e := range es { +// fmt.Println(e.Key, e.Value) +// } +// +type Results interface { + Query() Query // the query these Results correspond to + Next() <-chan Result // returns a channel to wait for the next result + NextSync() (Result, bool) // blocks and waits to return the next result, second parameter returns false when results are exhausted + Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once. + Close() error // client may call Close to signal early exit + + // Process returns a goprocess.Process associated with these results. + // most users will not need this function (Close is all they want), + // but it's here in case you want to connect the results to other + // goprocess-friendly things. + Process() goprocess.Process +} + +// results implements Results +type results struct { + query Query + proc goprocess.Process + res <-chan Result +} + +func (r *results) Next() <-chan Result { + return r.res +} + +func (r *results) NextSync() (Result, bool) { + val, ok := <-r.res + return val, ok +} + +func (r *results) Rest() ([]Entry, error) { + var es []Entry + for e := range r.res { + if e.Error != nil { + return es, e.Error + } + es = append(es, e.Entry) + } + <-r.proc.Closed() // wait till the processing finishes. + return es, nil +} + +func (r *results) Process() goprocess.Process { + return r.proc +} + +func (r *results) Close() error { + return r.proc.Close() +} + +func (r *results) Query() Query { + return r.query +} + +// ResultBuilder is what implementors use to construct results +// Implementors of datastores and their clients must respect the +// Process of the Request: +// +// * clients must call r.Process().Close() on an early exit, so +// implementations can reclaim resources. +// * if the Entries are read to completion (channel closed), Process +// should be closed automatically. +// * datastores must respect <-Process.Closing(), which intermediates +// an early close signal from the client. +// +type ResultBuilder struct { + Query Query + Process goprocess.Process + Output chan Result +} + +// Results returns a Results to to this builder. +func (rb *ResultBuilder) Results() Results { + return &results{ + query: rb.Query, + proc: rb.Process, + res: rb.Output, + } +} + +const NormalBufSize = 1 +const KeysOnlyBufSize = 128 + +func NewResultBuilder(q Query) *ResultBuilder { + bufSize := NormalBufSize + if q.KeysOnly { + bufSize = KeysOnlyBufSize + } + b := &ResultBuilder{ + Query: q, + Output: make(chan Result, bufSize), + } + b.Process = goprocess.WithTeardown(func() error { + close(b.Output) + return nil + }) + return b +} + +// ResultsWithChan returns a Results object from a channel +// of Result entries. +// +// DEPRECATED: This iterator is impossible to cancel correctly. Canceling it +// will leave anything trying to write to the result channel hanging. +func ResultsWithChan(q Query, res <-chan Result) Results { + return ResultsWithProcess(q, func(worker goprocess.Process, out chan<- Result) { + for { + select { + case <-worker.Closing(): // client told us to close early + return + case e, more := <-res: + if !more { + return + } + + select { + case out <- e: + case <-worker.Closing(): // client told us to close early + return + } + } + } + }) +} + +// ResultsWithProcess returns a Results object with the results generated by the +// passed subprocess. +func ResultsWithProcess(q Query, proc func(goprocess.Process, chan<- Result)) Results { + b := NewResultBuilder(q) + + // go consume all the entries and add them to the results. + b.Process.Go(func(worker goprocess.Process) { + proc(worker, b.Output) + }) + + go b.Process.CloseAfterChildren() //nolint + return b.Results() +} + +// ResultsWithEntries returns a Results object from a list of entries +func ResultsWithEntries(q Query, res []Entry) Results { + i := 0 + return ResultsFromIterator(q, Iterator{ + Next: func() (Result, bool) { + if i >= len(res) { + return Result{}, false + } + next := res[i] + i++ + return Result{Entry: next}, true + }, + }) +} + +func ResultsReplaceQuery(r Results, q Query) Results { + switch r := r.(type) { + case *results: + // note: not using field names to make sure all fields are copied + return &results{q, r.proc, r.res} + case *resultsIter: + // note: not using field names to make sure all fields are copied + lr := r.legacyResults + if lr != nil { + lr = &results{q, lr.proc, lr.res} + } + return &resultsIter{q, r.next, r.close, lr} + default: + panic("unknown results type") + } +} + +// +// ResultFromIterator provides an alternative way to to construct +// results without the use of channels. +// + +func ResultsFromIterator(q Query, iter Iterator) Results { + if iter.Close == nil { + iter.Close = noopClose + } + return &resultsIter{ + query: q, + next: iter.Next, + close: iter.Close, + } +} + +func noopClose() error { + return nil +} + +type Iterator struct { + Next func() (Result, bool) + Close func() error // note: might be called more than once +} + +type resultsIter struct { + query Query + next func() (Result, bool) + close func() error + legacyResults *results +} + +func (r *resultsIter) Next() <-chan Result { + r.useLegacyResults() + return r.legacyResults.Next() +} + +func (r *resultsIter) NextSync() (Result, bool) { + if r.legacyResults != nil { + return r.legacyResults.NextSync() + } else { + res, ok := r.next() + if !ok { + r.close() + } + return res, ok + } +} + +func (r *resultsIter) Rest() ([]Entry, error) { + var es []Entry + for { + e, ok := r.NextSync() + if !ok { + break + } + if e.Error != nil { + return es, e.Error + } + es = append(es, e.Entry) + } + return es, nil +} + +func (r *resultsIter) Process() goprocess.Process { + r.useLegacyResults() + return r.legacyResults.Process() +} + +func (r *resultsIter) Close() error { + if r.legacyResults != nil { + return r.legacyResults.Close() + } else { + return r.close() + } +} + +func (r *resultsIter) Query() Query { + return r.query +} + +func (r *resultsIter) useLegacyResults() { + if r.legacyResults != nil { + return + } + + b := NewResultBuilder(r.query) + + // go consume all the entries and add them to the results. + b.Process.Go(func(worker goprocess.Process) { + defer r.close() + for { + e, ok := r.next() + if !ok { + break + } + select { + case b.Output <- e: + case <-worker.Closing(): // client told us to close early + return + } + } + }) + + go b.Process.CloseAfterChildren() //nolint + + r.legacyResults = b.Results().(*results) +} diff --git a/vendor/github.com/ipfs/go-datastore/query/query_impl.go b/vendor/github.com/ipfs/go-datastore/query/query_impl.go new file mode 100644 index 0000000000..dd554e7433 --- /dev/null +++ b/vendor/github.com/ipfs/go-datastore/query/query_impl.go @@ -0,0 +1,158 @@ +package query + +import ( + "path" + + goprocess "github.com/jbenet/goprocess" +) + +// NaiveFilter applies a filter to the results. +func NaiveFilter(qr Results, filter Filter) Results { + return ResultsFromIterator(qr.Query(), Iterator{ + Next: func() (Result, bool) { + for { + e, ok := qr.NextSync() + if !ok { + return Result{}, false + } + if e.Error != nil || filter.Filter(e.Entry) { + return e, true + } + } + }, + Close: func() error { + return qr.Close() + }, + }) +} + +// NaiveLimit truncates the results to a given int limit +func NaiveLimit(qr Results, limit int) Results { + if limit == 0 { + // 0 means no limit + return qr + } + closed := false + return ResultsFromIterator(qr.Query(), Iterator{ + Next: func() (Result, bool) { + if limit == 0 { + if !closed { + closed = true + err := qr.Close() + if err != nil { + return Result{Error: err}, true + } + } + return Result{}, false + } + limit-- + return qr.NextSync() + }, + Close: func() error { + if closed { + return nil + } + closed = true + return qr.Close() + }, + }) +} + +// NaiveOffset skips a given number of results +func NaiveOffset(qr Results, offset int) Results { + return ResultsFromIterator(qr.Query(), Iterator{ + Next: func() (Result, bool) { + for ; offset > 0; offset-- { + res, ok := qr.NextSync() + if !ok || res.Error != nil { + return res, ok + } + } + return qr.NextSync() + }, + Close: func() error { + return qr.Close() + }, + }) +} + +// NaiveOrder reorders results according to given orders. +// WARNING: this is the only non-stream friendly operation! +func NaiveOrder(qr Results, orders ...Order) Results { + // Short circuit. + if len(orders) == 0 { + return qr + } + + return ResultsWithProcess(qr.Query(), func(worker goprocess.Process, out chan<- Result) { + defer qr.Close() + var entries []Entry + collect: + for { + select { + case <-worker.Closing(): + return + case e, ok := <-qr.Next(): + if !ok { + break collect + } + if e.Error != nil { + out <- e + continue + } + entries = append(entries, e.Entry) + } + } + + Sort(orders, entries) + + for _, e := range entries { + select { + case <-worker.Closing(): + return + case out <- Result{Entry: e}: + } + } + }) +} + +func NaiveQueryApply(q Query, qr Results) Results { + if q.Prefix != "" { + // Clean the prefix as a key and append / so a prefix of /bar + // only finds /bar/baz, not /barbaz. + prefix := q.Prefix + if len(prefix) == 0 { + prefix = "/" + } else { + if prefix[0] != '/' { + prefix = "/" + prefix + } + prefix = path.Clean(prefix) + } + // If the prefix is empty, ignore it. + if prefix != "/" { + qr = NaiveFilter(qr, FilterKeyPrefix{prefix + "/"}) + } + } + for _, f := range q.Filters { + qr = NaiveFilter(qr, f) + } + if len(q.Orders) > 0 { + qr = NaiveOrder(qr, q.Orders...) + } + if q.Offset != 0 { + qr = NaiveOffset(qr, q.Offset) + } + if q.Limit != 0 { + qr = NaiveLimit(qr, q.Limit) + } + return qr +} + +func ResultEntriesFrom(keys []string, vals [][]byte) []Entry { + re := make([]Entry, len(keys)) + for i, k := range keys { + re[i] = Entry{Key: k, Size: len(vals[i]), Value: vals[i]} + } + return re +} diff --git a/vendor/github.com/ipfs/go-filestore/LICENSE-APACHE b/vendor/github.com/ipfs/go-filestore/LICENSE-APACHE new file mode 100644 index 0000000000..14478a3b60 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/LICENSE-APACHE @@ -0,0 +1,5 @@ +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/ipfs/go-filestore/LICENSE-MIT b/vendor/github.com/ipfs/go-filestore/LICENSE-MIT new file mode 100644 index 0000000000..72dc60d84b --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/LICENSE-MIT @@ -0,0 +1,19 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-filestore/README.md b/vendor/github.com/ipfs/go-filestore/README.md new file mode 100644 index 0000000000..cf6940ef48 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/README.md @@ -0,0 +1,38 @@ +# go-filestore + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](https://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-filestore?status.svg)](https://godoc.org/github.com/ipfs/go-filestore) + +> a by-reference file-backed blockstore + +## Lead Maintainer + +[Steven Allen](https://github.com/Stebalien) + +## Table of Contents + +- [Documentation](#documentation) +- [Contribute](#contribute) +- [License](#license) + +## Documentation + +https://godoc.org/github.com/ipfs/go-filestore + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-filestore/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + +## License + +MIT + diff --git a/vendor/github.com/ipfs/go-filestore/filestore.go b/vendor/github.com/ipfs/go-filestore/filestore.go new file mode 100644 index 0000000000..a9c36c5d30 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/filestore.go @@ -0,0 +1,251 @@ +// Package filestore implements a Blockstore which is able to read certain +// blocks of data directly from its original location in the filesystem. +// +// In a Filestore, object leaves are stored as FilestoreNodes. FilestoreNodes +// include a filesystem path and an offset, allowing a Blockstore dealing with +// such blocks to avoid storing the whole contents and reading them from their +// filesystem location instead. +package filestore + +import ( + "context" + "errors" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + dsq "github.com/ipfs/go-datastore/query" + blockstore "github.com/ipfs/go-ipfs-blockstore" + posinfo "github.com/ipfs/go-ipfs-posinfo" + logging "github.com/ipfs/go-log" +) + +var logger = logging.Logger("filestore") + +var ErrFilestoreNotEnabled = errors.New("filestore is not enabled, see https://git.io/vNItf") +var ErrUrlstoreNotEnabled = errors.New("urlstore is not enabled") + +// Filestore implements a Blockstore by combining a standard Blockstore +// to store regular blocks and a special Blockstore called +// FileManager to store blocks which data exists in an external file. +type Filestore struct { + fm *FileManager + bs blockstore.Blockstore +} + +// FileManager returns the FileManager in Filestore. +func (f *Filestore) FileManager() *FileManager { + return f.fm +} + +// MainBlockstore returns the standard Blockstore in the Filestore. +func (f *Filestore) MainBlockstore() blockstore.Blockstore { + return f.bs +} + +// NewFilestore creates one using the given Blockstore and FileManager. +func NewFilestore(bs blockstore.Blockstore, fm *FileManager) *Filestore { + return &Filestore{fm, bs} +} + +// AllKeysChan returns a channel from which to read the keys stored in +// the blockstore. If the given context is cancelled the channel will be closed. +func (f *Filestore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + ctx, cancel := context.WithCancel(ctx) + + a, err := f.bs.AllKeysChan(ctx) + if err != nil { + cancel() + return nil, err + } + + out := make(chan cid.Cid, dsq.KeysOnlyBufSize) + go func() { + defer cancel() + defer close(out) + + var done bool + for !done { + select { + case c, ok := <-a: + if !ok { + done = true + continue + } + select { + case out <- c: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + + // Can't do these at the same time because the abstractions around + // leveldb make us query leveldb for both operations. We apparently + // cant query leveldb concurrently + b, err := f.fm.AllKeysChan(ctx) + if err != nil { + logger.Error("error querying filestore: ", err) + return + } + + done = false + for !done { + select { + case c, ok := <-b: + if !ok { + done = true + continue + } + select { + case out <- c: + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } + }() + return out, nil +} + +// DeleteBlock deletes the block with the given key from the +// blockstore. As expected, in the case of FileManager blocks, only the +// reference is deleted, not its contents. It may return +// ErrNotFound when the block is not stored. +func (f *Filestore) DeleteBlock(c cid.Cid) error { + err1 := f.bs.DeleteBlock(c) + if err1 != nil && err1 != blockstore.ErrNotFound { + return err1 + } + + err2 := f.fm.DeleteBlock(c) + // if we successfully removed something from the blockstore, but the + // filestore didnt have it, return success + + switch err2 { + case nil: + return nil + case blockstore.ErrNotFound: + if err1 == blockstore.ErrNotFound { + return blockstore.ErrNotFound + } + return nil + default: + return err2 + } +} + +// Get retrieves the block with the given Cid. It may return +// ErrNotFound when the block is not stored. +func (f *Filestore) Get(c cid.Cid) (blocks.Block, error) { + blk, err := f.bs.Get(c) + switch err { + case nil: + return blk, nil + case blockstore.ErrNotFound: + return f.fm.Get(c) + default: + return nil, err + } +} + +// GetSize returns the size of the requested block. It may return ErrNotFound +// when the block is not stored. +func (f *Filestore) GetSize(c cid.Cid) (int, error) { + size, err := f.bs.GetSize(c) + switch err { + case nil: + return size, nil + case blockstore.ErrNotFound: + return f.fm.GetSize(c) + default: + return -1, err + } +} + +// Has returns true if the block with the given Cid is +// stored in the Filestore. +func (f *Filestore) Has(c cid.Cid) (bool, error) { + has, err := f.bs.Has(c) + if err != nil { + return false, err + } + + if has { + return true, nil + } + + return f.fm.Has(c) +} + +// Put stores a block in the Filestore. For blocks of +// underlying type FilestoreNode, the operation is +// delegated to the FileManager, while the rest of blocks +// are handled by the regular blockstore. +func (f *Filestore) Put(b blocks.Block) error { + has, err := f.Has(b.Cid()) + if err != nil { + return err + } + + if has { + return nil + } + + switch b := b.(type) { + case *posinfo.FilestoreNode: + return f.fm.Put(b) + default: + return f.bs.Put(b) + } +} + +// PutMany is like Put(), but takes a slice of blocks, allowing +// the underlying blockstore to perform batch transactions. +func (f *Filestore) PutMany(bs []blocks.Block) error { + var normals []blocks.Block + var fstores []*posinfo.FilestoreNode + + for _, b := range bs { + has, err := f.Has(b.Cid()) + if err != nil { + return err + } + + if has { + continue + } + + switch b := b.(type) { + case *posinfo.FilestoreNode: + fstores = append(fstores, b) + default: + normals = append(normals, b) + } + } + + if len(normals) > 0 { + err := f.bs.PutMany(normals) + if err != nil { + return err + } + } + + if len(fstores) > 0 { + err := f.fm.PutMany(fstores) + if err != nil { + return err + } + } + return nil +} + +// HashOnRead calls blockstore.HashOnRead. +func (f *Filestore) HashOnRead(enabled bool) { + f.bs.HashOnRead(enabled) +} + +var _ blockstore.Blockstore = (*Filestore)(nil) diff --git a/vendor/github.com/ipfs/go-filestore/fsrefstore.go b/vendor/github.com/ipfs/go-filestore/fsrefstore.go new file mode 100644 index 0000000000..bc183fc38f --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/fsrefstore.go @@ -0,0 +1,339 @@ +package filestore + +import ( + "context" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + + pb "github.com/ipfs/go-filestore/pb" + + proto "github.com/gogo/protobuf/proto" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dsns "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + blockstore "github.com/ipfs/go-ipfs-blockstore" + dshelp "github.com/ipfs/go-ipfs-ds-help" + posinfo "github.com/ipfs/go-ipfs-posinfo" + mh "github.com/multiformats/go-multihash" +) + +// FilestorePrefix identifies the key prefix for FileManager blocks. +var FilestorePrefix = ds.NewKey("filestore") + +// FileManager is a blockstore implementation which stores special +// blocks FilestoreNode type. These nodes only contain a reference +// to the actual location of the block data in the filesystem +// (a path and an offset). +type FileManager struct { + AllowFiles bool + AllowUrls bool + ds ds.Batching + root string +} + +// CorruptReferenceError implements the error interface. +// It is used to indicate that the block contents pointed +// by the referencing blocks cannot be retrieved (i.e. the +// file is not found, or the data changed as it was being read). +type CorruptReferenceError struct { + Code Status + Err error +} + +// Error() returns the error message in the CorruptReferenceError +// as a string. +func (c CorruptReferenceError) Error() string { + return c.Err.Error() +} + +// NewFileManager initializes a new file manager with the given +// datastore and root. All FilestoreNodes paths are relative to the +// root path given here, which is prepended for any operations. +func NewFileManager(ds ds.Batching, root string) *FileManager { + return &FileManager{ds: dsns.Wrap(ds, FilestorePrefix), root: root} +} + +// AllKeysChan returns a channel from which to read the keys stored in +// the FileManager. If the given context is cancelled the channel will be +// closed. +// +// All CIDs returned are of type Raw. +func (f *FileManager) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + q := dsq.Query{KeysOnly: true} + + res, err := f.ds.Query(q) + if err != nil { + return nil, err + } + + out := make(chan cid.Cid, dsq.KeysOnlyBufSize) + go func() { + defer close(out) + for { + v, ok := res.NextSync() + if !ok { + return + } + + k := ds.RawKey(v.Key) + mhash, err := dshelp.DsKeyToMultihash(k) + if err != nil { + logger.Errorf("decoding cid from filestore: %s", err) + continue + } + + select { + case out <- cid.NewCidV1(cid.Raw, mhash): + case <-ctx.Done(): + return + } + } + }() + + return out, nil +} + +// DeleteBlock deletes the reference-block from the underlying +// datastore. It does not touch the referenced data. +func (f *FileManager) DeleteBlock(c cid.Cid) error { + err := f.ds.Delete(dshelp.MultihashToDsKey(c.Hash())) + if err == ds.ErrNotFound { + return blockstore.ErrNotFound + } + return err +} + +// Get reads a block from the datastore. Reading a block +// is done in two steps: the first step retrieves the reference +// block from the datastore. The second step uses the stored +// path and offsets to read the raw block data directly from disk. +func (f *FileManager) Get(c cid.Cid) (blocks.Block, error) { + dobj, err := f.getDataObj(c.Hash()) + if err != nil { + return nil, err + } + out, err := f.readDataObj(c.Hash(), dobj) + if err != nil { + return nil, err + } + + return blocks.NewBlockWithCid(out, c) +} + +// GetSize gets the size of the block from the datastore. +// +// This method may successfully return the size even if returning the block +// would fail because the associated file is no longer available. +func (f *FileManager) GetSize(c cid.Cid) (int, error) { + dobj, err := f.getDataObj(c.Hash()) + if err != nil { + return -1, err + } + return int(dobj.GetSize_()), nil +} + +func (f *FileManager) readDataObj(m mh.Multihash, d *pb.DataObj) ([]byte, error) { + if IsURL(d.GetFilePath()) { + return f.readURLDataObj(m, d) + } + return f.readFileDataObj(m, d) +} + +func (f *FileManager) getDataObj(m mh.Multihash) (*pb.DataObj, error) { + o, err := f.ds.Get(dshelp.MultihashToDsKey(m)) + switch err { + case ds.ErrNotFound: + return nil, blockstore.ErrNotFound + default: + return nil, err + case nil: + // + } + + return unmarshalDataObj(o) +} + +func unmarshalDataObj(data []byte) (*pb.DataObj, error) { + var dobj pb.DataObj + if err := proto.Unmarshal(data, &dobj); err != nil { + return nil, err + } + + return &dobj, nil +} + +func (f *FileManager) readFileDataObj(m mh.Multihash, d *pb.DataObj) ([]byte, error) { + if !f.AllowFiles { + return nil, ErrFilestoreNotEnabled + } + + p := filepath.FromSlash(d.GetFilePath()) + abspath := filepath.Join(f.root, p) + + fi, err := os.Open(abspath) + if os.IsNotExist(err) { + return nil, &CorruptReferenceError{StatusFileNotFound, err} + } else if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + defer fi.Close() + + _, err = fi.Seek(int64(d.GetOffset()), io.SeekStart) + if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + + outbuf := make([]byte, d.GetSize_()) + _, err = io.ReadFull(fi, outbuf) + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, &CorruptReferenceError{StatusFileChanged, err} + } else if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + + // Work with CIDs for this, as they are a nice wrapper and things + // will not break if multihashes underlying types change. + origCid := cid.NewCidV1(cid.Raw, m) + outcid, err := origCid.Prefix().Sum(outbuf) + if err != nil { + return nil, err + } + + if !origCid.Equals(outcid) { + return nil, &CorruptReferenceError{StatusFileChanged, + fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset())} + } + + return outbuf, nil +} + +// reads and verifies the block from URL +func (f *FileManager) readURLDataObj(m mh.Multihash, d *pb.DataObj) ([]byte, error) { + if !f.AllowUrls { + return nil, ErrUrlstoreNotEnabled + } + + req, err := http.NewRequest("GET", d.GetFilePath(), nil) + if err != nil { + return nil, err + } + + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", d.GetOffset(), d.GetOffset()+d.GetSize_()-1)) + + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusPartialContent { + return nil, &CorruptReferenceError{StatusFileError, + fmt.Errorf("expected HTTP 200 or 206 got %d", res.StatusCode)} + } + + outbuf := make([]byte, d.GetSize_()) + _, err = io.ReadFull(res.Body, outbuf) + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, &CorruptReferenceError{StatusFileChanged, err} + } else if err != nil { + return nil, &CorruptReferenceError{StatusFileError, err} + } + res.Body.Close() + + // Work with CIDs for this, as they are a nice wrapper and things + // will not break if multihashes underlying types change. + origCid := cid.NewCidV1(cid.Raw, m) + outcid, err := origCid.Prefix().Sum(outbuf) + if err != nil { + return nil, err + } + + if !origCid.Equals(outcid) { + return nil, &CorruptReferenceError{StatusFileChanged, + fmt.Errorf("data in file did not match. %s offset %d", d.GetFilePath(), d.GetOffset())} + } + + return outbuf, nil +} + +// Has returns if the FileManager is storing a block reference. It does not +// validate the data, nor checks if the reference is valid. +func (f *FileManager) Has(c cid.Cid) (bool, error) { + // NOTE: interesting thing to consider. Has doesnt validate the data. + // So the data on disk could be invalid, and we could think we have it. + dsk := dshelp.MultihashToDsKey(c.Hash()) + return f.ds.Has(dsk) +} + +type putter interface { + Put(ds.Key, []byte) error +} + +// Put adds a new reference block to the FileManager. It does not check +// that the reference is valid. +func (f *FileManager) Put(b *posinfo.FilestoreNode) error { + return f.putTo(b, f.ds) +} + +func (f *FileManager) putTo(b *posinfo.FilestoreNode, to putter) error { + var dobj pb.DataObj + + if IsURL(b.PosInfo.FullPath) { + if !f.AllowUrls { + return ErrUrlstoreNotEnabled + } + dobj.FilePath = b.PosInfo.FullPath + } else { + if !f.AllowFiles { + return ErrFilestoreNotEnabled + } + if !filepath.HasPrefix(b.PosInfo.FullPath, f.root) { //nolint:staticcheck + return fmt.Errorf("cannot add filestore references outside ipfs root (%s)", f.root) + } + + p, err := filepath.Rel(f.root, b.PosInfo.FullPath) + if err != nil { + return err + } + + dobj.FilePath = filepath.ToSlash(p) + } + dobj.Offset = b.PosInfo.Offset + dobj.Size_ = uint64(len(b.RawData())) + + data, err := proto.Marshal(&dobj) + if err != nil { + return err + } + + return to.Put(dshelp.MultihashToDsKey(b.Cid().Hash()), data) +} + +// PutMany is like Put() but takes a slice of blocks instead, +// allowing it to create a batch transaction. +func (f *FileManager) PutMany(bs []*posinfo.FilestoreNode) error { + batch, err := f.ds.Batch() + if err != nil { + return err + } + + for _, b := range bs { + if err := f.putTo(b, batch); err != nil { + return err + } + } + + return batch.Commit() +} + +// IsURL returns true if the string represents a valid URL that the +// urlstore can handle. More specifically it returns true if a string +// begins with 'http://' or 'https://'. +func IsURL(str string) bool { + return (len(str) > 7 && str[0] == 'h' && str[1] == 't' && str[2] == 't' && str[3] == 'p') && + ((len(str) > 8 && str[4] == 's' && str[5] == ':' && str[6] == '/' && str[7] == '/') || + (str[4] == ':' && str[5] == '/' && str[6] == '/')) +} diff --git a/vendor/github.com/ipfs/go-filestore/go.mod b/vendor/github.com/ipfs/go-filestore/go.mod new file mode 100644 index 0000000000..096a5f1061 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/go.mod @@ -0,0 +1,16 @@ +module github.com/ipfs/go-filestore + +go 1.12 + +require ( + github.com/gogo/protobuf v1.3.1 + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.5 + github.com/ipfs/go-datastore v0.4.4 + github.com/ipfs/go-ipfs-blockstore v1.0.0 + github.com/ipfs/go-ipfs-ds-help v1.0.0 + github.com/ipfs/go-ipfs-posinfo v0.0.1 + github.com/ipfs/go-log v1.0.2 + github.com/ipfs/go-merkledag v0.3.1 + github.com/multiformats/go-multihash v0.0.13 +) diff --git a/vendor/github.com/ipfs/go-filestore/go.sum b/vendor/github.com/ipfs/go-filestore/go.sum new file mode 100644 index 0000000000..b1f4d4e3b5 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/go.sum @@ -0,0 +1,387 @@ +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c h1:aEbSeNALREWXk0G7UdNhR3ayBV7tZ4M2PNmnrCAph6Q= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ipfs/bbloom v0.0.1 h1:s7KkiBPfxCeDVo47KySjK0ACPc5GJRUxFpdyWEuDjhw= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.1.0 h1:28YsHYw9ut6wootnImPXH0WpnU5Dbo3qm6cvQ6e6wYY= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-blockservice v0.1.0 h1:dh2i7xjMbCtf0ZSMyQAF2qpV/pEEmM7yVpQ00+gik6U= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2 h1:tuuKaZPU1M6HcejsO3AcYWW8sZ8MTvyxfc4uqB4eFE8= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-datastore v0.0.1 h1:AW/KZCScnBWlSb5JbnEnLKFWXL224LBEh/9KXXOrUms= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5 h1:q3OfiOZV5rlsK1H5V8benjeUApRfMGs4Mrhmr6NriQo= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0 h1:TOxI04l8CmO4zGtesENhzm4PwkFwJXY3rKiYaaMf9fI= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.3.1 h1:SS1t869a6cctoSYmZXUk8eL6AzVXgASmKIWFNQkQ1jU= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.1 h1:W4ZfzyhNi3xmuU5dQhjfuRn/wFuqEE1KnOmmQiOevEY= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ipfs-blockstore v0.0.1 h1:O9n3PbmTYZoNhkgkEyrXTznbmktIXif62xLX+8dPHzc= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0 h1:V1GZorHFUIB6YgTJQdq7mcaIpUfCM3fCyVi+MTo9O88= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v1.0.0 h1:pmFp5sFYsYVvMOp9X01AK3s85usVcLvkBTRsN6SnfUA= +github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= +github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= +github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.1.0 h1:bpRbgv76eT4avutNPDFZuCPOQus6qTgurEYxfulgZW4= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-flow-metrics v0.0.1 h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-libp2p v0.1.0 h1:8VXadcPNni74ODoZ+7326LMAppFYmz1fRQOUuT5iZvQ= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-blankhost v0.1.1 h1:X919sCh+KLqJcNRApj43xCSiQRYqOSI88Fdf55ngf78= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2 h1:86uOwW+O6Uc7NbaK4diuLZo2/Ikvqw2rgyV03VcSbLE= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-nat v0.0.4 h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0 h1:MKh7pRNPHSh1fLPj8u/M/s/napdmeNpoi9BRy9lPN0E= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-record v0.1.0 h1:wHwBGbFzymoIl69BpgwIu0O6ta3TXGcMPvHUAcodzRc= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-secio v0.1.0 h1:NNP5KLxuP97sE5Bu3iuwOWyT/dKEGMN5zSLMWdB7GTQ= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-swarm v0.1.0 h1:HrFk2p0awrGEgch9JXK/qp/hfjqQfgNxpLWnCiWPg5s= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3 h1:bdij4bKaaND7tCsaXVjRfYkMpvoOeKj9AVQGJllA6jM= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-yamux v0.2.0 h1:TSPZ5cMMz/wdoYsye/wU1TE4G3LDGMoeEN0xgnCKU/I= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-maddr-filter v0.0.4 h1:hx8HIuuwk34KePddrp2mM5ivgPkZ09JH4AvsALRbFUs= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-msgio v0.0.2 h1:ivPvEKHxmVkTClHzg6RXTYHqaJQ0V9cDbq+6lKb3UV0= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-tcp-transport v0.1.0 h1:IGhowvEqyMFknOar4FWCKSWE0zL36UFKQtiRQD60/8o= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-testutil v0.1.0 h1:4QhjaWGO89udplblLVpgGDOQjzFlRavZOjuEnz2rLMc= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-yamux v1.2.2 h1:s6J6o7+ajoQMjHe7BEnq+EynOj5D2EoG8CuQgL3F2vg= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0 h1:U41/2erhAKcmSI14xh/ZTUdBPOzDOIfS93ibzUSl8KM= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2 h1:/Bbsgsy3R6e3jf2qBahzNHzww6usYaZ0NhNH3sqdFS8= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5 h1:1wxmCvTXAifAepIMyF39vZinRw5sbqjPs/UIi93+uik= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7 h1:C2F/nMkR/9sfUTpvR3QrjBuTdvMUC/cFajkphs1YLQo= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158 h1:v73Zw0Y1htnV0qaOAYSNiuIAviPSBkNtdy1tPi1+zpY= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ipfs/go-filestore/pb/Rules.mk b/vendor/github.com/ipfs/go-filestore/pb/Rules.mk new file mode 100644 index 0000000000..505f70e754 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/pb/Rules.mk @@ -0,0 +1,8 @@ +include mk/header.mk + +PB_$(d) = $(wildcard $(d)/*.proto) +TGTS_$(d) = $(PB_$(d):.proto=.pb.go) + +#DEPS_GO += $(TGTS_$(d)) + +include mk/footer.mk diff --git a/vendor/github.com/ipfs/go-filestore/pb/dataobj.pb.go b/vendor/github.com/ipfs/go-filestore/pb/dataobj.pb.go new file mode 100644 index 0000000000..5ecc2489e0 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/pb/dataobj.pb.go @@ -0,0 +1,375 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: dataobj.proto + +package datastore_pb + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type DataObj struct { + FilePath string `protobuf:"bytes,1,opt,name=FilePath" json:"FilePath"` + Offset uint64 `protobuf:"varint,2,opt,name=Offset" json:"Offset"` + Size_ uint64 `protobuf:"varint,3,opt,name=Size" json:"Size"` +} + +func (m *DataObj) Reset() { *m = DataObj{} } +func (m *DataObj) String() string { return proto.CompactTextString(m) } +func (*DataObj) ProtoMessage() {} +func (*DataObj) Descriptor() ([]byte, []int) { + return fileDescriptor_a76cb282d869d683, []int{0} +} +func (m *DataObj) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DataObj) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DataObj.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DataObj) XXX_Merge(src proto.Message) { + xxx_messageInfo_DataObj.Merge(m, src) +} +func (m *DataObj) XXX_Size() int { + return m.Size() +} +func (m *DataObj) XXX_DiscardUnknown() { + xxx_messageInfo_DataObj.DiscardUnknown(m) +} + +var xxx_messageInfo_DataObj proto.InternalMessageInfo + +func (m *DataObj) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *DataObj) GetOffset() uint64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *DataObj) GetSize_() uint64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func init() { + proto.RegisterType((*DataObj)(nil), "datastore.pb.DataObj") +} + +func init() { proto.RegisterFile("dataobj.proto", fileDescriptor_a76cb282d869d683) } + +var fileDescriptor_a76cb282d869d683 = []byte{ + // 150 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x49, 0x2c, 0x49, + 0xcc, 0x4f, 0xca, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x71, 0x8b, 0x4b, 0xf2, + 0x8b, 0x52, 0xf5, 0x0a, 0x92, 0x94, 0x92, 0xb9, 0xd8, 0x5d, 0x12, 0x4b, 0x12, 0xfd, 0x93, 0xb2, + 0x84, 0x14, 0xb8, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x03, 0x12, 0x4b, 0x32, 0x24, 0x18, 0x15, 0x18, + 0x35, 0x38, 0x9d, 0x58, 0x4e, 0xdc, 0x93, 0x67, 0x08, 0x82, 0x8b, 0x0a, 0xc9, 0x70, 0xb1, 0xf9, + 0xa7, 0xa5, 0x15, 0xa7, 0x96, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xb0, 0x40, 0xe5, 0xa1, 0x62, 0x42, + 0x12, 0x5c, 0x2c, 0xc1, 0x99, 0x55, 0xa9, 0x12, 0xcc, 0x48, 0x72, 0x60, 0x11, 0x27, 0x89, 0x13, + 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, + 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x00, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x4a, + 0x76, 0xa0, 0x9c, 0x00, 0x00, 0x00, +} + +func (m *DataObj) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DataObj) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DataObj) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintDataobj(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x18 + i = encodeVarintDataobj(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x10 + i -= len(m.FilePath) + copy(dAtA[i:], m.FilePath) + i = encodeVarintDataobj(dAtA, i, uint64(len(m.FilePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintDataobj(dAtA []byte, offset int, v uint64) int { + offset -= sovDataobj(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DataObj) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FilePath) + n += 1 + l + sovDataobj(uint64(l)) + n += 1 + sovDataobj(uint64(m.Offset)) + n += 1 + sovDataobj(uint64(m.Size_)) + return n +} + +func sovDataobj(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDataobj(x uint64) (n int) { + return sovDataobj(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DataObj) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DataObj: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DataObj: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDataobj + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDataobj + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDataobj + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDataobj(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDataobj + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDataobj + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDataobj(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataobj + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataobj + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDataobj + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDataobj + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDataobj + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDataobj + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDataobj = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDataobj = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDataobj = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/ipfs/go-filestore/pb/dataobj.proto b/vendor/github.com/ipfs/go-filestore/pb/dataobj.proto new file mode 100644 index 0000000000..909d22b77f --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/pb/dataobj.proto @@ -0,0 +1,9 @@ +syntax = "proto2"; + +package datastore.pb; + +message DataObj { + optional string FilePath = 1; + optional uint64 Offset = 2; + optional uint64 Size = 3; +} diff --git a/vendor/github.com/ipfs/go-filestore/util.go b/vendor/github.com/ipfs/go-filestore/util.go new file mode 100644 index 0000000000..dc860f7352 --- /dev/null +++ b/vendor/github.com/ipfs/go-filestore/util.go @@ -0,0 +1,291 @@ +package filestore + +import ( + "fmt" + "sort" + + pb "github.com/ipfs/go-filestore/pb" + + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dsq "github.com/ipfs/go-datastore/query" + blockstore "github.com/ipfs/go-ipfs-blockstore" + dshelp "github.com/ipfs/go-ipfs-ds-help" + mh "github.com/multiformats/go-multihash" +) + +// Status is used to identify the state of the block data referenced +// by a FilestoreNode. Among other places, it is used by CorruptReferenceError. +type Status int32 + +// These are the supported Status codes. +const ( + StatusOk Status = 0 + StatusFileError Status = 10 // Backing File Error + StatusFileNotFound Status = 11 // Backing File Not Found + StatusFileChanged Status = 12 // Contents of the file changed + StatusOtherError Status = 20 // Internal Error, likely corrupt entry + StatusKeyNotFound Status = 30 +) + +// String provides a human-readable representation for Status codes. +func (s Status) String() string { + switch s { + case StatusOk: + return "ok" + case StatusFileError: + return "error" + case StatusFileNotFound: + return "no-file" + case StatusFileChanged: + return "changed" + case StatusOtherError: + return "ERROR" + case StatusKeyNotFound: + return "missing" + default: + return "???" + } +} + +// Format returns the status formatted as a string +// with leading 0s. +func (s Status) Format() string { + return fmt.Sprintf("%-7s", s.String()) +} + +// ListRes wraps the response of the List*() functions, which +// allows to obtain and verify blocks stored by the FileManager +// of a Filestore. It includes information about the referenced +// block. +type ListRes struct { + Status Status + ErrorMsg string + Key cid.Cid + FilePath string + Offset uint64 + Size uint64 +} + +// FormatLong returns a human readable string for a ListRes object +func (r *ListRes) FormatLong(enc func(cid.Cid) string) string { + if enc == nil { + enc = (cid.Cid).String + } + switch { + case !r.Key.Defined(): + return "" + case r.FilePath == "": + return r.Key.String() + default: + return fmt.Sprintf("%-50s %6d %s %d", enc(r.Key), r.Size, r.FilePath, r.Offset) + } +} + +// List fetches the block with the given key from the Filemanager +// of the given Filestore and returns a ListRes object with the information. +// List does not verify that the reference is valid or whether the +// raw data is accesible. See Verify(). +func List(fs *Filestore, key cid.Cid) *ListRes { + return list(fs, false, key.Hash()) +} + +// ListAll returns a function as an iterator which, once invoked, returns +// one by one each block in the Filestore's FileManager. +// ListAll does not verify that the references are valid or whether +// the raw data is accessible. See VerifyAll(). +func ListAll(fs *Filestore, fileOrder bool) (func() *ListRes, error) { + if fileOrder { + return listAllFileOrder(fs, false) + } + return listAll(fs, false) +} + +// Verify fetches the block with the given key from the Filemanager +// of the given Filestore and returns a ListRes object with the information. +// Verify makes sure that the reference is valid and the block data can be +// read. +func Verify(fs *Filestore, key cid.Cid) *ListRes { + return list(fs, true, key.Hash()) +} + +// VerifyAll returns a function as an iterator which, once invoked, +// returns one by one each block in the Filestore's FileManager. +// VerifyAll checks that the reference is valid and that the block data +// can be read. +func VerifyAll(fs *Filestore, fileOrder bool) (func() *ListRes, error) { + if fileOrder { + return listAllFileOrder(fs, true) + } + return listAll(fs, true) +} + +func list(fs *Filestore, verify bool, key mh.Multihash) *ListRes { + dobj, err := fs.fm.getDataObj(key) + if err != nil { + return mkListRes(key, nil, err) + } + if verify { + _, err = fs.fm.readDataObj(key, dobj) + } + return mkListRes(key, dobj, err) +} + +func listAll(fs *Filestore, verify bool) (func() *ListRes, error) { + q := dsq.Query{} + qr, err := fs.fm.ds.Query(q) + if err != nil { + return nil, err + } + + return func() *ListRes { + mhash, dobj, err := next(qr) + if dobj == nil && err == nil { + return nil + } else if err == nil && verify { + _, err = fs.fm.readDataObj(mhash, dobj) + } + return mkListRes(mhash, dobj, err) + }, nil +} + +func next(qr dsq.Results) (mh.Multihash, *pb.DataObj, error) { + v, ok := qr.NextSync() + if !ok { + return nil, nil, nil + } + + k := ds.RawKey(v.Key) + mhash, err := dshelp.DsKeyToMultihash(k) + if err != nil { + return nil, nil, fmt.Errorf("decoding multihash from filestore: %s", err) + } + + dobj, err := unmarshalDataObj(v.Value) + if err != nil { + return mhash, nil, err + } + + return mhash, dobj, nil +} + +func listAllFileOrder(fs *Filestore, verify bool) (func() *ListRes, error) { + q := dsq.Query{} + qr, err := fs.fm.ds.Query(q) + if err != nil { + return nil, err + } + + var entries listEntries + + for { + v, ok := qr.NextSync() + if !ok { + break + } + dobj, err := unmarshalDataObj(v.Value) + if err != nil { + entries = append(entries, &listEntry{ + dsKey: v.Key, + err: err, + }) + } else { + entries = append(entries, &listEntry{ + dsKey: v.Key, + filePath: dobj.GetFilePath(), + offset: dobj.GetOffset(), + size: dobj.GetSize_(), + }) + } + } + sort.Sort(entries) + + i := 0 + return func() *ListRes { + if i >= len(entries) { + return nil + } + v := entries[i] + i++ + // attempt to convert the datastore key to a Multihash, + // store the error but don't use it yet + mhash, keyErr := dshelp.DsKeyToMultihash(ds.RawKey(v.dsKey)) + // first if they listRes already had an error return that error + if v.err != nil { + return mkListRes(mhash, nil, v.err) + } + // now reconstruct the DataObj + dobj := pb.DataObj{ + FilePath: v.filePath, + Offset: v.offset, + Size_: v.size, + } + // now if we could not convert the datastore key return that + // error + if keyErr != nil { + return mkListRes(mhash, &dobj, keyErr) + } + // finally verify the dataobj if requested + var err error + if verify { + _, err = fs.fm.readDataObj(mhash, &dobj) + } + return mkListRes(mhash, &dobj, err) + }, nil +} + +type listEntry struct { + filePath string + offset uint64 + dsKey string + size uint64 + err error +} + +type listEntries []*listEntry + +func (l listEntries) Len() int { return len(l) } +func (l listEntries) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l listEntries) Less(i, j int) bool { + if l[i].filePath == l[j].filePath { + if l[i].offset == l[j].offset { + return l[i].dsKey < l[j].dsKey + } + return l[i].offset < l[j].offset + } + return l[i].filePath < l[j].filePath +} + +func mkListRes(m mh.Multihash, d *pb.DataObj, err error) *ListRes { + status := StatusOk + errorMsg := "" + if err != nil { + if err == ds.ErrNotFound || err == blockstore.ErrNotFound { + status = StatusKeyNotFound + } else if err, ok := err.(*CorruptReferenceError); ok { + status = err.Code + } else { + status = StatusOtherError + } + errorMsg = err.Error() + } + + c := cid.NewCidV1(cid.Raw, m) + + if d == nil { + return &ListRes{ + Status: status, + ErrorMsg: errorMsg, + Key: c, + } + } + + return &ListRes{ + Status: status, + ErrorMsg: errorMsg, + Key: c, + FilePath: d.FilePath, + Size: d.Size_, + Offset: d.Offset, + } +} diff --git a/vendor/github.com/ipfs/go-graphsync/.golangci.yml b/vendor/github.com/ipfs/go-graphsync/.golangci.yml new file mode 100644 index 0000000000..ba18d3a450 --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-files: + - testutil/chaintypes/testchain_gen.go \ No newline at end of file diff --git a/vendor/github.com/ipfs/go-graphsync/COPYRIGHT b/vendor/github.com/ipfs/go-graphsync/COPYRIGHT new file mode 100644 index 0000000000..771e6f7cd7 --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/ipfs/go-graphsync/LICENSE-APACHE b/vendor/github.com/ipfs/go-graphsync/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/ipfs/go-graphsync/LICENSE-MIT b/vendor/github.com/ipfs/go-graphsync/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ipfs/go-graphsync/README.md b/vendor/github.com/ipfs/go-graphsync/README.md new file mode 100644 index 0000000000..f9baff8f9c --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/README.md @@ -0,0 +1,234 @@ +# go-graphsync + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![Matrix](https://img.shields.io/badge/matrix-%23ipfs%3Amatrix.org-blue.svg?style=flat-square)](https://matrix.to/#/#ipfs:matrix.org) +[![IRC](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Discord](https://img.shields.io/discord/475789330380488707?color=blueviolet&label=discord&style=flat-square)](https://discord.gg/24fmuwR) +[![Coverage Status](https://codecov.io/gh/ipfs/go-graphsync/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-graphsync/branch/master) +[![Build Status](https://circleci.com/gh/ipfs/go-bitswap.svg?style=svg)](https://circleci.com/gh/ipfs/go-graphsync) + +> An implementation of the [graphsync protocol](https://github.com/ipld/specs/blob/master/block-layer/graphsync/graphsync.md) in go! + +## Table of Contents + +- [Background](#background) +- [Install](#install) +- [Usage](#usage) +- [Architecture](#architecture) +- [Contribute](#contribute) +- [License](#license) + +## Background + +[GraphSync](https://github.com/ipld/specs/blob/master/block-layer/graphsync/graphsync.md) is a protocol for synchronizing IPLD graphs among peers. It allows a host to make a single request to a remote peer for all of the results of traversing an [IPLD selector](https://github.com/ipld/specs/blob/master/block-layer/selectors/selectors.md) on the remote peer's local IPLD graph. + +`go-graphsync` provides an implementation of the Graphsync protocol in go. + +### Go-IPLD-Prime + +`go-graphsync` relies on `go-ipld-prime` to traverse IPLD Selectors in an IPLD graph. `go-ipld-prime` implements the [IPLD specification](https://github.com/ipld/specs) in go and is an alternative to older implementations such as `go-ipld-format` and `go-ipld-cbor`. In order to use `go-graphsync`, some understanding and use of `go-ipld-prime` concepts is necessary. + +If your existing library (i.e. `go-ipfs` or `go-filecoin`) uses these other older libraries, you can largely use go-graphsync without switching to `go-ipld-prime` across your codebase, but it will require some translations + +## Install + +`go-graphsync` requires Go >= 1.11 and can be installed using Go modules + +## Usage + +### Initializing a GraphSync Exchange + +```golang +import ( + graphsync "github.com/ipfs/go-graphsync/impl" + gsnet "github.com/ipfs/go-graphsync/network" + ipld "github.com/ipld/go-ipld-prime" +) + +var ctx context.Context +var host libp2p.Host +var loader ipld.Loader +var storer ipld.Storer + +network := gsnet.NewFromLibp2pHost(host) +exchange := graphsync.New(ctx, network, loader, storer) +``` + +Parameter Notes: + +1. `context` is just the parent context for all of GraphSync +2. `network` is a network abstraction provided to Graphsync on top +of libp2p. This allows graphsync to be tested without the actual network +3. `loader` is used to load blocks from content ids from the local block store. It's used when RESPONDING to requests from other clients. It should conform to the IPLD loader interface: https://github.com/ipld/go-ipld-prime/blob/master/linking.go +4. `storer` is used to store incoming blocks to the local block store. It's used when REQUESTING a graphsync query, to store blocks locally once they are validated as part of the correct response. It should conform to the IPLD storer interface: https://github.com/ipld/go-ipld-prime/blob/master/linking.go + +### Using GraphSync With An IPFS BlockStore + +GraphSync provides two convenience functions in the `storeutil` package for +integrating with BlockStore's from IPFS. + +```golang +import ( + graphsync "github.com/ipfs/go-graphsync/impl" + gsnet "github.com/ipfs/go-graphsync/network" + storeutil "github.com/ipfs/go-graphsync/storeutil" + ipld "github.com/ipld/go-ipld-prime" + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +var ctx context.Context +var host libp2p.Host +var bs blockstore.Blockstore + +network := gsnet.NewFromLibp2pHost(host) +loader := storeutil.LoaderForBlockstore(bs) +storer := storeutil.StorerForBlockstore(bs) + +exchange := graphsync.New(ctx, network, loader, storer) +``` + +### Write A Loader An IPFS BlockStore + +If you are using a traditional go-ipfs-blockstore, your link loading function looks like this: + +```golang +type BlockStore interface { + Get(lnk cid.Cid) (blocks.Block, error) +} +``` + +or, more generally: + +```golang +type Cid2BlockFn func (lnk cid.Cid) (blocks.Block, error) +``` + +in `go-ipld-prime`, the signature for a link loader is as follows: + +```golang +type Loader func(lnk Link, lnkCtx LinkContext) (io.Reader, error) +``` + +`go-ipld-prime` intentionally keeps its interfaces as abstract as possible to limit dependencies on other ipfs/filecoin specific packages. An IPLD Link is an abstraction for a CID, and IPLD expects io.Reader's rather than an actual block. IPLD provides a `cidLink` package for working with Links that use CIDs as the underlying data, and it's safe to assume that's the type in use if your code deals only with CIDs. A conversion would look something like this: + +```golang +import ( + ipld "github.com/ipld/go-ipld-prime" + cidLink "github.com/ipld/go-ipld-prime/linking/cid" +) + +func LoaderFromCid2BlockFn(cid2BlockFn Cid2BlockFn) ipld.Loader { + return func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { + asCidLink, ok := lnk.(cidlink.Link) + if !ok { + return nil, fmt.Errorf("Unsupported Link Type") + } + block, err := cid2BlockFn(asCidLink.Cid) + if err != nil { + return nil, err + } + return bytes.NewReader(block.RawData()), nil + } +} +``` + +### Write A Storer From An IPFS BlockStore + +If you are using a traditional go-ipfs-blockstore, your storage function looks like this: + +```golang +type BlockStore interface { + Put(blocks.Block) error +} +``` + +or, more generally: + +```golang +type BlockStoreFn func (blocks.Block) (error) +``` + +in `go-ipld-prime`, the signature for a link storer is a bit different: + +```golang +type StoreCommitter func(Link) error +type Storer func(lnkCtx LinkContext) (io.Writer, StoreCommitter, error) +``` + +`go-ipld-prime` stores in two parts to support streaming -- the storer is called and returns an IO.Writer and a function to commit changes when finished. Here's how you can write a storer from a traditional block storing signature. + +```golang +import ( + blocks "github.com/ipfs/go-block-format" + ipld "github.com/ipld/go-ipld-prime" + cidLink "github.com/ipld/go-ipld-prime/linking/cid" +) + +func StorerFromBlockStoreFn(blockStoreFn BlockStoreFn) ipld.Storer { + return func(lnkCtx ipld.LinkContext) (io.Writer, ipld.StoreCommitter, error) { + var buffer bytes.Buffer + committer := func(lnk ipld.Link) error { + asCidLink, ok := lnk.(cidlink.Link) + if !ok { + return fmt.Errorf("Unsupported Link Type") + } + block := blocks.NewBlockWithCid(buffer.Bytes(), asCidLink.Cid) + return blockStoreFn(block) + } + return &buffer, committer, nil + } +} +``` + +### Calling Graphsync + +```golang +var exchange graphsync.GraphSync +var ctx context.Context +var p peer.ID +var selector ipld.Node +var rootLink ipld.Link + +var responseProgress <-chan graphsync.ResponseProgress +var errors <-chan error + +responseProgress, errors = exchange.Request(ctx context.Context, p peer.ID, root ipld.Link, selector ipld.Node) +``` + +Paramater Notes: +1. `ctx` is the context for this request. To cancel an in progress request, cancel the context. +2. `p` is the peer you will send this request to +3. `link` is an IPLD Link, i.e. a CID (cidLink.Link{Cid}) +4. `selector` is an IPLD selector node. Recommend using selector builders from go-ipld-prime to construct these + +### Response Type + +```golang + +type ResponseProgress struct { + Node ipld.Node // a node which matched the graphsync query + Path ipld.Path // the path of that node relative to the traversal start + LastBlock struct { // LastBlock stores the Path and Link of the last block edge we had to load. + ipld.Path + ipld.Link + } +} + +``` + +The above provides both immediate and relevant metadata for matching nodes in a traversal, and is very similar to the information provided by a local IPLD selector traversal in `go-ipld-prime` + +## Contribute + +PRs are welcome! + +Before doing anything heavy, checkout the [Graphsync Architecture](docs/architecture.md) + +See our [Contributing Guidelines](https://github.com/ipfs/go-graphsync/blob/master/CONTRIBUTING.md) for more info. + +## License + +This library is dual-licensed under Apache 2.0 and MIT terms. + +Copyright 2019. Protocol Labs, Inc. diff --git a/vendor/github.com/ipfs/go-graphsync/go.mod b/vendor/github.com/ipfs/go-graphsync/go.mod new file mode 100644 index 0000000000..05fbe72563 --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/go.mod @@ -0,0 +1,48 @@ +module github.com/ipfs/go-graphsync + +go 1.12 + +require ( + github.com/gogo/protobuf v1.3.1 + github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect + github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 + github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-blockservice v0.1.3 + github.com/ipfs/go-cid v0.0.6 + github.com/ipfs/go-datastore v0.4.4 + github.com/ipfs/go-ipfs-blockstore v0.1.4 + github.com/ipfs/go-ipfs-blocksutil v0.0.1 + github.com/ipfs/go-ipfs-chunker v0.0.5 + github.com/ipfs/go-ipfs-delay v0.0.1 + github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipfs-files v0.0.8 + github.com/ipfs/go-ipfs-routing v0.1.0 + github.com/ipfs/go-ipfs-util v0.0.1 + github.com/ipfs/go-ipld-cbor v0.0.4 // indirect + github.com/ipfs/go-ipld-format v0.2.0 + github.com/ipfs/go-log v1.0.3 + github.com/ipfs/go-merkledag v0.3.1 + github.com/ipfs/go-peertaskqueue v0.2.0 + github.com/ipfs/go-unixfs v0.2.4 + github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f + github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 + github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/libp2p/go-libp2p v0.6.0 + github.com/libp2p/go-libp2p-core v0.5.0 + github.com/libp2p/go-libp2p-netutil v0.1.0 + github.com/libp2p/go-libp2p-record v0.1.1 // indirect + github.com/libp2p/go-libp2p-testing v0.1.1 + github.com/libp2p/go-msgio v0.0.6 + github.com/multiformats/go-multiaddr v0.2.1 + github.com/multiformats/go-multihash v0.0.13 + github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a // indirect + github.com/smartystreets/assertions v1.0.1 // indirect + github.com/stretchr/testify v1.5.1 + github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d + go.uber.org/multierr v1.4.0 // indirect + golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 // indirect + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + google.golang.org/protobuf v1.25.0 +) diff --git a/vendor/github.com/ipfs/go-graphsync/go.sum b/vendor/github.com/ipfs/go-graphsync/go.sum new file mode 100644 index 0000000000..91bdb59e1e --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/go.sum @@ -0,0 +1,781 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= +github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32 h1:qkOC5Gd33k54tobS36cXdAzJbeHaduLtnLQQwNoIi78= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c h1:aEbSeNALREWXk0G7UdNhR3ayBV7tZ4M2PNmnrCAph6Q= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 h1:A/EVblehb75cUgXA5njHPn0kLAsykn6mJGz7rnmW5W0= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= +github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= +github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= +github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/bbloom v0.0.1 h1:s7KkiBPfxCeDVo47KySjK0ACPc5GJRUxFpdyWEuDjhw= +github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= +github.com/ipfs/go-bitswap v0.1.2 h1:IkhOPiifc6b2LWTi/vp8TXwNT0eGCsizI1JFbZ08IQQ= +github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= +github.com/ipfs/go-bitswap v0.1.8 h1:38X1mKXkiU6Nzw4TOSWD8eTVY5eX3slQunv3QEWfXKg= +github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= +github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-blockservice v0.1.0 h1:dh2i7xjMbCtf0ZSMyQAF2qpV/pEEmM7yVpQ00+gik6U= +github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= +github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= +github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2 h1:tuuKaZPU1M6HcejsO3AcYWW8sZ8MTvyxfc4uqB4eFE8= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-datastore v0.0.1 h1:AW/KZCScnBWlSb5JbnEnLKFWXL224LBEh/9KXXOrUms= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.0.5 h1:q3OfiOZV5rlsK1H5V8benjeUApRfMGs4Mrhmr6NriQo= +github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ipfs-blockstore v0.0.1 h1:O9n3PbmTYZoNhkgkEyrXTznbmktIXif62xLX+8dPHzc= +github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= +github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= +github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= +github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= +github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= +github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw= +github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= +github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= +github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU= +github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= +github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= +github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= +github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= +github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= +github.com/ipfs/go-ipfs-pq v0.0.1 h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU= +github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= +github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= +github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.2 h1:amzFztBQQQ69UA5+f7JRfoXF/z2l//MGfEDHVkS20+s= +github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I= +github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= +github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-merkledag v0.2.3 h1:aMdkK9G1hEeNvn3VXfiEMLY0iJnbiQQUHnM0HFJREsE= +github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= +github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= +github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/ipfs/go-peertaskqueue v0.1.0 h1:bpRbgv76eT4avutNPDFZuCPOQus6qTgurEYxfulgZW4= +github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= +github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= +github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= +github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= +github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= +github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= +github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ= +github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88= +github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0= +github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= +github.com/libp2p/go-eventbus v0.1.0 h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ= +github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= +github.com/libp2p/go-flow-metrics v0.0.1 h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= +github.com/libp2p/go-libp2p v0.6.0 h1:EFArryT9N7AVA70LCcOh8zxsW+FeDnxwcpWQx9k7+GM= +github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= +github.com/libp2p/go-libp2p-autonat v0.1.0 h1:aCWAu43Ri4nU0ZPO7NyLzUvvfqd0nE3dX0R/ZGYVgOU= +github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= +github.com/libp2p/go-libp2p-blankhost v0.1.4 h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-circuit v0.1.0 h1:eniLL3Y9aq/sryfyV1IAHj5rlvuyj3b7iz8tSiZpdhY= +github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf/Kr6hZJj8= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= +github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4 h1:Et6ykkTwI6PU44tr8qUF9k43vP0aduMNniShAbUJJw8= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.0 h1:FBQ1fpq2Fo/ClyjojVJ5AKXlKhvNc/B6U0O+7AN1ffE= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= +github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= +github.com/libp2p/go-libp2p-discovery v0.1.0 h1:j+R6cokKcGbnZLf4kcNwpx6mDEUPF3N6SrqMymQhmvs= +github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= +github.com/libp2p/go-libp2p-discovery v0.2.0 h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= +github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= +github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= +github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.2 h1:+Ld7YDAfVERQ0E+qqjE7o6fHwKuM0SqTzYiwN1lVVSA= +github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= +github.com/libp2p/go-libp2p-nat v0.0.4 h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw= +github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5 h1:/mH8pXFVKleflDL1YwqMg27W9GD8kjEx7NY0P6eGc98= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= +github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= +github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= +github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= +github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M= +github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= +github.com/libp2p/go-libp2p-peerstore v0.2.0 h1:XcgJhI8WyUOCbHyRLNEX5542YNj8hnLSJ2G1InRjDhk= +github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= +github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-record v0.1.0 h1:wHwBGbFzymoIl69BpgwIu0O6ta3TXGcMPvHUAcodzRc= +github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-record v0.1.1 h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY= +github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= +github.com/libp2p/go-libp2p-secio v0.2.0 h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng= +github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= +github.com/libp2p/go-libp2p-swarm v0.2.2 h1:T4hUpgEs2r371PweU3DuH7EOmBIdTBCwWs+FLcgx3bQ= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= +github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4 h1:Qev57UR47GcLPXWjrunv5aLIQGO4n9mhI/8/EIrEEFc= +github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.0 h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U= +github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw= +github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0 h1:5EhPgQhXZNyfL22ERZTUoVp9UVVbNowWNVtELQaKCHk= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1 h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2 h1:eGvbqWqWY9S5lrpe2gA0UCOLCdzCgYSAR3vo/xCsNQg= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= +github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= +github.com/libp2p/go-mplex v0.1.0 h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.1 h1:huPH/GGRJzmsHR9IZJJsrSwIM5YE2gL4ssgl1YWb/ps= +github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI= +github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4 h1:KbizNnq8YIf7+Hn7+VFL/xE0eDrkPru2zIO9NMwL8UQ= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= +github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3 h1:wjlG7HvQkt4Fq4cfH33Ivpwp0omaElYEi9z26qaIkIk= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4= +github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-stream-muxer v0.0.1 h1:Ce6e2Pyu+b5MC1k3eeFtAax0pW4gc6MosYSLV05UeLw= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg= +github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-tcp-transport v0.1.0 h1:IGhowvEqyMFknOar4FWCKSWE0zL36UFKQtiRQD60/8o= +github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= +github.com/libp2p/go-tcp-transport v0.1.1 h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= +github.com/libp2p/go-ws-transport v0.1.0 h1:F+0OvvdmPTDsVc4AjPHjV7L7Pk1B7D5QwtDcKE2oag4= +github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-ws-transport v0.2.0 h1:MJCw2OrPA9+76YNRvdo1wMnSOxb9Bivj6sVFY1Xrj6w= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI= +github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.0 h1:FsYzT16Wq2XqUGJsBbOxoz9g+dFklvNi7jN6YFPfl7U= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5 h1:l16XLUUJ34wIz+RIvLhSwGvLvKyy+W598b135bJN6mg= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0 h1:U41/2erhAKcmSI14xh/ZTUdBPOzDOIfS93ibzUSl8KM= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1 h1:/QUV3VBMDI6pi6xfiw7lr6xhDWWvQKn9udPn68kLSdY= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1 h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1 h1:SgG/cw5vqyB5QQe5FPe2TqggU9WtrA9X4nZw7LlVqOI= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2 h1:/Bbsgsy3R6e3jf2qBahzNHzww6usYaZ0NhNH3sqdFS8= +github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA= +github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.2 h1:P7zcBH9FRETdPkDrylcXVjQLQ2t1JQtNItZULWNWgeg= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5 h1:1wxmCvTXAifAepIMyF39vZinRw5sbqjPs/UIi93+uik= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ= +github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14 h1:2m16U/rLwVaRdz7ANkHtHTodP3zTP3N451MADg64x5k= +github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= +github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg= +github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= +github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d h1:wSxKhvbN7kUoP0sfRS+w2tWr45qlU8409i94hHLOT8w= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= +github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b h1:+/WWzjwW6gidDJnMKWLKLX1gxn7irUTF1fLpQovfQ5M= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 h1:IcSOAf4PyMp3U3XbIEj1/xJ2BjNN2jWv7JoyOsMxXUU= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7 h1:C2F/nMkR/9sfUTpvR3QrjBuTdvMUC/cFajkphs1YLQo= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e h1:ZytStCyV048ZqDsWHiYDdoI2Vd4msMcrDECFxS+tL9c= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/github.com/ipfs/go-graphsync/graphsync.go b/vendor/github.com/ipfs/go-graphsync/graphsync.go new file mode 100644 index 0000000000..bcbda10828 --- /dev/null +++ b/vendor/github.com/ipfs/go-graphsync/graphsync.go @@ -0,0 +1,359 @@ +package graphsync + +import ( + "context" + "errors" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/libp2p/go-libp2p-core/peer" +) + +// RequestID is a unique identifier for a GraphSync request. +type RequestID int32 + +// Priority a priority for a GraphSync request. +type Priority int32 + +// ResponseStatusCode is a status returned for a GraphSync Request. +type ResponseStatusCode int32 + +// ExtensionName is a name for a GraphSync extension +type ExtensionName string + +// ExtensionData is a name/data pair for a graphsync extension +type ExtensionData struct { + Name ExtensionName + Data []byte +} + +const ( + + // Known Graphsync Extensions + + // ExtensionMetadata provides response metadata for a Graphsync request and is + // documented at + // https://github.com/ipld/specs/blob/master/block-layer/graphsync/known_extensions.md + ExtensionMetadata = ExtensionName("graphsync/response-metadata") + + // ExtensionDoNotSendCIDs tells the responding peer not to send certain blocks if they + // are encountered in a traversal and is documented at + // https://github.com/ipld/specs/blob/master/block-layer/graphsync/known_extensions.md + ExtensionDoNotSendCIDs = ExtensionName("graphsync/do-not-send-cids") + + // ExtensionDeDupByKey tells the responding peer to only deduplicate block sending + // for requests that have the same key. The data for the extension is a string key + ExtensionDeDupByKey = ExtensionName("graphsync/dedup-by-key") + + // GraphSync Response Status Codes + + // Informational Response Codes (partial) + + // RequestAcknowledged means the request was received and is being worked on. + RequestAcknowledged = ResponseStatusCode(10) + // AdditionalPeers means additional peers were found that may be able + // to satisfy the request and contained in the extra block of the response. + AdditionalPeers = ResponseStatusCode(11) + // NotEnoughGas means fulfilling this request requires payment. + NotEnoughGas = ResponseStatusCode(12) + // OtherProtocol means a different type of response than GraphSync is + // contained in extra. + OtherProtocol = ResponseStatusCode(13) + // PartialResponse may include blocks and metadata about the in progress response + // in extra. + PartialResponse = ResponseStatusCode(14) + // RequestPaused indicates a request is paused and will not send any more data + // until unpaused + RequestPaused = ResponseStatusCode(15) + + // Success Response Codes (request terminated) + + // RequestCompletedFull means the entire fulfillment of the GraphSync request + // was sent back. + RequestCompletedFull = ResponseStatusCode(20) + // RequestCompletedPartial means the response is completed, and part of the + // GraphSync request was sent back, but not the complete request. + RequestCompletedPartial = ResponseStatusCode(21) + + // Error Response Codes (request terminated) + + // RequestRejected means the node did not accept the incoming request. + RequestRejected = ResponseStatusCode(30) + // RequestFailedBusy means the node is too busy, try again later. Backoff may + // be contained in extra. + RequestFailedBusy = ResponseStatusCode(31) + // RequestFailedUnknown means the request failed for an unspecified reason. May + // contain data about why in extra. + RequestFailedUnknown = ResponseStatusCode(32) + // RequestFailedLegal means the request failed for legal reasons. + RequestFailedLegal = ResponseStatusCode(33) + // RequestFailedContentNotFound means the respondent does not have the content. + RequestFailedContentNotFound = ResponseStatusCode(34) + // RequestCancelled means the responder was processing the request but decided to top, for whatever reason + RequestCancelled = ResponseStatusCode(35) +) + +// RequestContextCancelledErr is an error message received on the error channel when the request context given by the user is cancelled/times out +type RequestContextCancelledErr struct{} + +func (e RequestContextCancelledErr) Error() string { + return "Request Context Cancelled" +} + +// RequestFailedBusyErr is an error message received on the error channel when the peer is busy +type RequestFailedBusyErr struct{} + +func (e RequestFailedBusyErr) Error() string { + return "Request Failed - Peer Is Busy" +} + +// RequestFailedContentNotFoundErr is an error message received on the error channel when the content is not found +type RequestFailedContentNotFoundErr struct{} + +func (e RequestFailedContentNotFoundErr) Error() string { + return "Request Failed - Content Not Found" +} + +// RequestFailedLegalErr is an error message received on the error channel when the request fails for legal reasons +type RequestFailedLegalErr struct{} + +func (e RequestFailedLegalErr) Error() string { + return "Request Failed - For Legal Reasons" +} + +// RequestFailedUnknownErr is an error message received on the error channel when the request fails for unknown reasons +type RequestFailedUnknownErr struct{} + +func (e RequestFailedUnknownErr) Error() string { + return "Request Failed - Unknown Reason" +} + +// RequestCancelledErr is an error message received on the error channel that indicates the responder cancelled a request +type RequestCancelledErr struct{} + +func (e RequestCancelledErr) Error() string { + return "Request Failed - Responder Cancelled" +} + +var ( + // ErrExtensionAlreadyRegistered means a user extension can be registered only once + ErrExtensionAlreadyRegistered = errors.New("extension already registered") +) + +// ResponseProgress is the fundamental unit of responses making progress in Graphsync. +type ResponseProgress struct { + Node ipld.Node // a node which matched the graphsync query + Path ipld.Path // the path of that node relative to the traversal start + LastBlock struct { // LastBlock stores the Path and Link of the last block edge we had to load. + Path ipld.Path + Link ipld.Link + } +} + +// RequestData describes a received graphsync request. +type RequestData interface { + // ID Returns the request ID for this Request + ID() RequestID + + // Root returns the CID to the root block of this request + Root() cid.Cid + + // Selector returns the byte representation of the selector for this request + Selector() ipld.Node + + // Priority returns the priority of this request + Priority() Priority + + // Extension returns the content for an extension on a response, or errors + // if extension is not present + Extension(name ExtensionName) ([]byte, bool) + + // IsCancel returns true if this particular request is being cancelled + IsCancel() bool +} + +// ResponseData describes a received Graphsync response +type ResponseData interface { + // RequestID returns the request ID for this response + RequestID() RequestID + + // Status returns the status for a response + Status() ResponseStatusCode + + // Extension returns the content for an extension on a response, or errors + // if extension is not present + Extension(name ExtensionName) ([]byte, bool) +} + +// BlockData gives information about a block included in a graphsync response +type BlockData interface { + // Link is the link/cid for the block + Link() ipld.Link + + // BlockSize specifies the size of the block + BlockSize() uint64 + + // BlockSize specifies the amount of data actually transmitted over the network + BlockSizeOnWire() uint64 +} + +// IncomingRequestHookActions are actions that a request hook can take to change +// behavior for the response +type IncomingRequestHookActions interface { + SendExtensionData(ExtensionData) + UsePersistenceOption(name string) + UseLinkTargetNodePrototypeChooser(traversal.LinkTargetNodePrototypeChooser) + TerminateWithError(error) + ValidateRequest() + PauseResponse() +} + +// OutgoingBlockHookActions are actions that an outgoing block hook can take to +// change the execution of a request +type OutgoingBlockHookActions interface { + SendExtensionData(ExtensionData) + TerminateWithError(error) + PauseResponse() +} + +// OutgoingRequestHookActions are actions that an outgoing request hook can take +// to change the execution of a request +type OutgoingRequestHookActions interface { + UsePersistenceOption(name string) + UseLinkTargetNodePrototypeChooser(traversal.LinkTargetNodePrototypeChooser) +} + +// IncomingResponseHookActions are actions that incoming response hook can take +// to change the execution of a request +type IncomingResponseHookActions interface { + TerminateWithError(error) + UpdateRequestWithExtensions(...ExtensionData) +} + +// IncomingBlockHookActions are actions that incoming block hook can take +// to change the execution of a request +type IncomingBlockHookActions interface { + TerminateWithError(error) + UpdateRequestWithExtensions(...ExtensionData) + PauseRequest() +} + +// RequestUpdatedHookActions are actions that can be taken in a request updated hook to +// change execution of the response +type RequestUpdatedHookActions interface { + TerminateWithError(error) + SendExtensionData(ExtensionData) + UnpauseResponse() +} + +// OnIncomingRequestHook is a hook that runs each time a new request is received. +// It receives the peer that sent the request and all data about the request. +// It receives an interface for customizing the response to this request +type OnIncomingRequestHook func(p peer.ID, request RequestData, hookActions IncomingRequestHookActions) + +// OnIncomingResponseHook is a hook that runs each time a new response is received. +// It receives the peer that sent the response and all data about the response. +// It receives an interface for customizing how we handle the ongoing execution of the request +type OnIncomingResponseHook func(p peer.ID, responseData ResponseData, hookActions IncomingResponseHookActions) + +// OnIncomingBlockHook is a hook that runs each time a new block is validated as +// part of the response, regardless of whether it came locally or over the network +// It receives that sent the response, the most recent response, a link for the block received, +// and the size of the block received +// The difference between BlockSize & BlockSizeOnWire can be used to determine +// where the block came from (Local vs remote) +// It receives an interface for customizing how we handle the ongoing execution of the request +type OnIncomingBlockHook func(p peer.ID, responseData ResponseData, blockData BlockData, hookActions IncomingBlockHookActions) + +// OnOutgoingRequestHook is a hook that runs immediately prior to sending a request +// It receives the peer we're sending a request to and all the data aobut the request +// It receives an interface for customizing how we handle executing this request +type OnOutgoingRequestHook func(p peer.ID, request RequestData, hookActions OutgoingRequestHookActions) + +// OnOutgoingBlockHook is a hook that runs immediately after a requestor sends a new block +// on a response +// It receives the peer we're sending a request to, all the data aobut the request, a link for the block sent, +// and the size of the block sent +// It receives an interface for taking further action on the response +type OnOutgoingBlockHook func(p peer.ID, request RequestData, block BlockData, hookActions OutgoingBlockHookActions) + +// OnRequestUpdatedHook is a hook that runs when an update to a request is received +// It receives the peer we're sending to, the original request, the request update +// It receives an interface to taking further action on the response +type OnRequestUpdatedHook func(p peer.ID, request RequestData, updateRequest RequestData, hookActions RequestUpdatedHookActions) + +// OnBlockSentListener runs when a block is sent over the wire +type OnBlockSentListener func(p peer.ID, request RequestData, block BlockData) + +// OnNetworkErrorListener runs when queued data is not able to be sent +type OnNetworkErrorListener func(p peer.ID, request RequestData, err error) + +// OnResponseCompletedListener provides a way to listen for when responder has finished serving a response +type OnResponseCompletedListener func(p peer.ID, request RequestData, status ResponseStatusCode) + +// OnRequestorCancelledListener provides a way to listen for responses the requestor canncels +type OnRequestorCancelledListener func(p peer.ID, request RequestData) + +// UnregisterHookFunc is a function call to unregister a hook that was previously registered +type UnregisterHookFunc func() + +// GraphExchange is a protocol that can exchange IPLD graphs based on a selector +type GraphExchange interface { + // Request initiates a new GraphSync request to the given peer using the given selector spec. + Request(ctx context.Context, p peer.ID, root ipld.Link, selector ipld.Node, extensions ...ExtensionData) (<-chan ResponseProgress, <-chan error) + + // RegisterPersistenceOption registers an alternate loader/storer combo that can be substituted for the default + RegisterPersistenceOption(name string, loader ipld.Loader, storer ipld.Storer) error + + // UnregisterPersistenceOption unregisters an alternate loader/storer combo + UnregisterPersistenceOption(name string) error + + // RegisterIncomingRequestHook adds a hook that runs when a request is received + RegisterIncomingRequestHook(hook OnIncomingRequestHook) UnregisterHookFunc + + // RegisterIncomingResponseHook adds a hook that runs when a response is received + RegisterIncomingResponseHook(OnIncomingResponseHook) UnregisterHookFunc + + // RegisterIncomingBlockHook adds a hook that runs when a block is received and validated (put in block store) + RegisterIncomingBlockHook(OnIncomingBlockHook) UnregisterHookFunc + + // RegisterOutgoingRequestHook adds a hook that runs immediately prior to sending a new request + RegisterOutgoingRequestHook(hook OnOutgoingRequestHook) UnregisterHookFunc + + // RegisterOutgoingBlockHook adds a hook that runs every time a block is sent from a responder + RegisterOutgoingBlockHook(hook OnOutgoingBlockHook) UnregisterHookFunc + + // RegisterRequestUpdatedHook adds a hook that runs every time an update to a request is received + RegisterRequestUpdatedHook(hook OnRequestUpdatedHook) UnregisterHookFunc + + // RegisterCompletedResponseListener adds a listener on the responder for completed responses + RegisterCompletedResponseListener(listener OnResponseCompletedListener) UnregisterHookFunc + + // RegisterRequestorCancelledListener adds a listener on the responder for + // responses cancelled by the requestor + RegisterRequestorCancelledListener(listener OnRequestorCancelledListener) UnregisterHookFunc + + // RegisterBlockSentListener adds a listener for when blocks are actually sent over the wire + RegisterBlockSentListener(listener OnBlockSentListener) UnregisterHookFunc + + // RegisterNetworkErrorListener adds a listener for when errors occur sending data over the wire + RegisterNetworkErrorListener(listener OnNetworkErrorListener) UnregisterHookFunc + + // UnpauseRequest unpauses a request that was paused in a block hook based request ID + // Can also send extensions with unpause + UnpauseRequest(RequestID, ...ExtensionData) error + + // PauseRequest pauses an in progress request (may take 1 or more blocks to process) + PauseRequest(RequestID) error + + // UnpauseResponse unpauses a response that was paused in a block hook based on peer ID and request ID + // Can also send extensions with unpause + UnpauseResponse(peer.ID, RequestID, ...ExtensionData) error + + // PauseResponse pauses an in progress response (may take 1 or more blocks to process) + PauseResponse(peer.ID, RequestID) error + + // CancelResponse cancels an in progress response + CancelResponse(peer.ID, RequestID) error +} diff --git a/vendor/github.com/ipfs/go-hamt-ipld/.gitignore b/vendor/github.com/ipfs/go-hamt-ipld/.gitignore new file mode 100644 index 0000000000..398baf21b2 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.idea diff --git a/vendor/github.com/ipfs/go-hamt-ipld/.travis.yml b/vendor/github.com/ipfs/go-hamt-ipld/.travis.yml new file mode 100644 index 0000000000..923835bc58 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/.travis.yml @@ -0,0 +1,31 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-hamt-ipld/LICENSE b/vendor/github.com/ipfs/go-hamt-ipld/LICENSE new file mode 100644 index 0000000000..83f48ce5a4 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Whyrusleeping + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-hamt-ipld/Makefile b/vendor/github.com/ipfs/go-hamt-ipld/Makefile new file mode 100644 index 0000000000..0ad4560a70 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/Makefile @@ -0,0 +1,14 @@ +all: build + +build: + go build ./... +.PHONY: build + +test: + go test ./... +.PHONY: test + +benchmark: + go test -bench=./... +.PHONY: benchmark + diff --git a/vendor/github.com/ipfs/go-hamt-ipld/README.md b/vendor/github.com/ipfs/go-hamt-ipld/README.md new file mode 100644 index 0000000000..f3b9327fa2 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/README.md @@ -0,0 +1,34 @@ +go-hamt-ipld +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://libp2p.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Travis CI](https://travis-ci.org/ipfs/go-hamt-ipld.svg?branch=master)](https://travis-ci.org/ipfs/go-hamt-ipld) + +> A CHAMP HAMT implemented using ipld + + +## Table of Contents + +- [Usage](#usage) +- [API](#api) +- [Contribute](#contribute) +- [License](#license) + + +## Examples + +```go +// TODO +``` + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Whyrusleeping diff --git a/vendor/github.com/ipfs/go-hamt-ipld/cbor_gen.go b/vendor/github.com/ipfs/go-hamt-ipld/cbor_gen.go new file mode 100644 index 0000000000..409007f64e --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/cbor_gen.go @@ -0,0 +1,214 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package hamt + +import ( + "fmt" + "io" + "math/big" + + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +// NOTE: This is a generated file, but it has been modified to encode the +// bitfield big.Int as a byte array. The bitfield is only a big.Int because +// thats a convenient type for the operations we need to perform on it, but it +// is fundamentally an array of bytes (bits) + +var _ = xerrors.Errorf + +var lengthBufNode = []byte{130} + +func (t *Node) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufNode); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Bitfield (big.Int) (struct) + { + var b []byte + if t.Bitfield != nil { + b = t.Bitfield.Bytes() + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(b))); err != nil { + return err + } + if _, err := w.Write(b); err != nil { + return err + } + } + + // t.Pointers ([]*hamt.Pointer) (slice) + if len(t.Pointers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Pointers was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Pointers))); err != nil { + return err + } + for _, v := range t.Pointers { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *Node) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Bitfield (big.Int) (struct) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajByteString { + return fmt.Errorf("big ints should be tagged cbor byte strings") + } + + if extra > 256 { + return fmt.Errorf("t.Bitfield: cbor bignum was too large") + } + + if extra > 0 { + buf := make([]byte, extra) + if _, err := io.ReadFull(br, buf); err != nil { + return err + } + t.Bitfield = big.NewInt(0).SetBytes(buf) + } else { + t.Bitfield = big.NewInt(0) + } + // t.Pointers ([]*hamt.Pointer) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Pointers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Pointers = make([]*Pointer, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Pointer + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Pointers[i] = &v + } + + return nil +} + +var lengthBufKV = []byte{130} + +func (t *KV) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufKV); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Key ([]uint8) (slice) + if len(t.Key) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Key was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Key))); err != nil { + return err + } + + if _, err := w.Write(t.Key); err != nil { + return err + } + + // t.Value (typegen.Deferred) (struct) + if err := t.Value.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *KV) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Key ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Key: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + t.Key = make([]byte, extra) + if _, err := io.ReadFull(br, t.Key); err != nil { + return err + } + // t.Value (typegen.Deferred) (struct) + + { + + t.Value = new(cbg.Deferred) + + if err := t.Value.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/ipfs/go-hamt-ipld/codecov.yml b/vendor/github.com/ipfs/go-hamt-ipld/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/ipfs/go-hamt-ipld/go.mod b/vendor/github.com/ipfs/go-hamt-ipld/go.mod new file mode 100644 index 0000000000..b85720e53f --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/go.mod @@ -0,0 +1,14 @@ +module github.com/ipfs/go-hamt-ipld + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 + github.com/ipfs/go-ipld-cbor v0.0.4 + github.com/ipfs/go-ipld-format v0.0.2 // indirect + github.com/spaolacci/murmur3 v1.1.0 + github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d + golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb // indirect + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) + +go 1.13 diff --git a/vendor/github.com/ipfs/go-hamt-ipld/go.sum b/vendor/github.com/ipfs/go-hamt-ipld/go.sum new file mode 100644 index 0000000000..1a09d64cf6 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/go.sum @@ -0,0 +1,86 @@ +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4 h1:UlfXKrZx1DjZoBhQHmNHLC1fK1dUJDN20Y28A7s+gJ8= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0 h1:dmdwCOVtJAm7qwONARangN4jgCisVFmSJ486JZ1LYaA= +github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d h1:Y25auOnuZb/GuJvqMflRSDWBz8/HBRME8fiD+H8zLfs= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/ipfs/go-hamt-ipld/hamt.go b/vendor/github.com/ipfs/go-hamt-ipld/hamt.go new file mode 100644 index 0000000000..af34088edb --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/hamt.go @@ -0,0 +1,456 @@ +package hamt + +import ( + "bytes" + "context" + "fmt" + "math/big" + + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +const arrayWidth = 3 +const defaultBitWidth = 8 + +type Node struct { + Bitfield *big.Int `refmt:"bf"` + Pointers []*Pointer `refmt:"p"` + + bitWidth int + + // for fetching and storing children + store cbor.IpldStore +} + +// Option is a function that configures the node +type Option func(*Node) + +// UseTreeBitWidth allows you to set the width of the HAMT tree +// in bits (from 1-8) via a customized hash function +func UseTreeBitWidth(bitWidth int) Option { + return func(nd *Node) { + if bitWidth > 0 && bitWidth <= 8 { + nd.bitWidth = bitWidth + } + } +} + +// NewNode creates a new IPLD HAMT Node with the given store and given +// options +func NewNode(cs cbor.IpldStore, options ...Option) *Node { + nd := &Node{ + Bitfield: big.NewInt(0), + Pointers: make([]*Pointer, 0), + store: cs, + bitWidth: defaultBitWidth, + } + // apply functional options to node before using + for _, option := range options { + option(nd) + } + return nd +} + +type KV struct { + Key []byte + Value *cbg.Deferred +} + +type Pointer struct { + KVs []*KV `refmt:"v,omitempty"` + Link cid.Cid `refmt:"l,omitempty"` + + // cached node to avoid too many serialization operations + cache *Node +} + +func (n *Node) Find(ctx context.Context, k string, out interface{}) error { + return n.getValue(ctx, &hashBits{b: hash([]byte(k))}, k, func(kv *KV) error { + // used to just see if the thing exists in the set + if out == nil { + return nil + } + + if um, ok := out.(cbg.CBORUnmarshaler); ok { + return um.UnmarshalCBOR(bytes.NewReader(kv.Value.Raw)) + } + + if err := cbor.DecodeInto(kv.Value.Raw, out); err != nil { + return xerrors.Errorf("cbor decoding value: %w", err) + } + + return nil + }) +} + +func (n *Node) FindRaw(ctx context.Context, k string) ([]byte, error) { + var ret []byte + err := n.getValue(ctx, &hashBits{b: hash([]byte(k))}, k, func(kv *KV) error { + ret = kv.Value.Raw + return nil + }) + return ret, err +} + +func (n *Node) Delete(ctx context.Context, k string) error { + kb := []byte(k) + return n.modifyValue(ctx, &hashBits{b: hash(kb)}, kb, nil) +} + +var ErrNotFound = fmt.Errorf("not found") +var ErrMaxDepth = fmt.Errorf("attempted to traverse hamt beyond max depth") + +func (n *Node) getValue(ctx context.Context, hv *hashBits, k string, cb func(*KV) error) error { + idx, err := hv.Next(n.bitWidth) + if err != nil { + return ErrMaxDepth + } + + if n.Bitfield.Bit(idx) == 0 { + return ErrNotFound + } + + cindex := byte(n.indexForBitPos(idx)) + + c := n.getChild(cindex) + if c.isShard() { + chnd, err := c.loadChild(ctx, n.store, n.bitWidth) + if err != nil { + return err + } + + return chnd.getValue(ctx, hv, k, cb) + } + + for _, kv := range c.KVs { + if string(kv.Key) == k { + return cb(kv) + } + } + + return ErrNotFound +} + +func (p *Pointer) loadChild(ctx context.Context, ns cbor.IpldStore, bitWidth int) (*Node, error) { + if p.cache != nil { + return p.cache, nil + } + + out, err := LoadNode(ctx, ns, p.Link) + if err != nil { + return nil, err + } + out.bitWidth = bitWidth + + p.cache = out + return out, nil +} + +func LoadNode(ctx context.Context, cs cbor.IpldStore, c cid.Cid, options ...Option) (*Node, error) { + var out Node + if err := cs.Get(ctx, c, &out); err != nil { + return nil, err + } + + out.store = cs + out.bitWidth = defaultBitWidth + // apply functional options to node before using + for _, option := range options { + option(&out) + } + + return &out, nil +} + +func (n *Node) checkSize(ctx context.Context) (uint64, error) { + c, err := n.store.Put(ctx, n) + if err != nil { + return 0, err + } + + var def cbg.Deferred + if err := n.store.Get(ctx, c, &def); err != nil { + return 0, nil + } + + totsize := uint64(len(def.Raw)) + for _, ch := range n.Pointers { + if ch.isShard() { + chnd, err := ch.loadChild(ctx, n.store, n.bitWidth) + if err != nil { + return 0, err + } + chsize, err := chnd.checkSize(ctx) + if err != nil { + return 0, err + } + totsize += chsize + } + } + + return totsize, nil +} + +func (n *Node) Flush(ctx context.Context) error { + for _, p := range n.Pointers { + if p.cache != nil { + if err := p.cache.Flush(ctx); err != nil { + return err + } + + c, err := n.store.Put(ctx, p.cache) + if err != nil { + return err + } + + p.cache = nil + p.Link = c + } + } + return nil +} + +// SetRaw sets key k to cbor bytes raw +func (n *Node) SetRaw(ctx context.Context, k string, raw []byte) error { + d := &cbg.Deferred{Raw: raw} + kb := []byte(k) + return n.modifyValue(ctx, &hashBits{b: hash(kb)}, kb, d) +} + +func (n *Node) Set(ctx context.Context, k string, v interface{}) error { + var d *cbg.Deferred + + kb := []byte(k) + + cm, ok := v.(cbg.CBORMarshaler) + if ok { + buf := new(bytes.Buffer) + if err := cm.MarshalCBOR(buf); err != nil { + return err + } + d = &cbg.Deferred{Raw: buf.Bytes()} + } else { + b, err := cbor.DumpObject(v) + if err != nil { + return err + } + d = &cbg.Deferred{Raw: b} + } + + return n.modifyValue(ctx, &hashBits{b: hash(kb)}, kb, d) +} + +func (n *Node) cleanChild(chnd *Node, cindex byte) error { + l := len(chnd.Pointers) + switch { + case l == 0: + return fmt.Errorf("incorrectly formed HAMT") + case l == 1: + // TODO: only do this if its a value, cant do this for shards unless pairs requirements are met. + + ps := chnd.Pointers[0] + if ps.isShard() { + return nil + } + + return n.setChild(cindex, ps) + case l <= arrayWidth: + var chvals []*KV + for _, p := range chnd.Pointers { + if p.isShard() { + return nil + } + + for _, sp := range p.KVs { + if len(chvals) == arrayWidth { + return nil + } + chvals = append(chvals, sp) + } + } + return n.setChild(cindex, &Pointer{KVs: chvals}) + default: + return nil + } +} + +func (n *Node) modifyValue(ctx context.Context, hv *hashBits, k []byte, v *cbg.Deferred) error { + idx, err := hv.Next(n.bitWidth) + if err != nil { + return ErrMaxDepth + } + + if n.Bitfield.Bit(idx) != 1 { + return n.insertChild(idx, k, v) + } + + cindex := byte(n.indexForBitPos(idx)) + + child := n.getChild(cindex) + if child.isShard() { + chnd, err := child.loadChild(ctx, n.store, n.bitWidth) + if err != nil { + return err + } + + if err := chnd.modifyValue(ctx, hv, k, v); err != nil { + return err + } + + // CHAMP optimization, ensure trees look correct after deletions + if v == nil { + if err := n.cleanChild(chnd, cindex); err != nil { + return err + } + } + + return nil + } + + if v == nil { + for i, p := range child.KVs { + if bytes.Equal(p.Key, k) { + if len(child.KVs) == 1 { + return n.rmChild(cindex, idx) + } + + copy(child.KVs[i:], child.KVs[i+1:]) + child.KVs = child.KVs[:len(child.KVs)-1] + return nil + } + } + return ErrNotFound + } + + // check if key already exists + for _, p := range child.KVs { + if bytes.Equal(p.Key, k) { + p.Value = v + return nil + } + } + + // If the array is full, create a subshard and insert everything into it + if len(child.KVs) >= arrayWidth { + sub := NewNode(n.store) + sub.bitWidth = n.bitWidth + hvcopy := &hashBits{b: hv.b, consumed: hv.consumed} + if err := sub.modifyValue(ctx, hvcopy, k, v); err != nil { + return err + } + + for _, p := range child.KVs { + chhv := &hashBits{b: hash(p.Key), consumed: hv.consumed} + if err := sub.modifyValue(ctx, chhv, p.Key, p.Value); err != nil { + return err + } + } + + c, err := n.store.Put(ctx, sub) + if err != nil { + return err + } + + return n.setChild(cindex, &Pointer{Link: c}) + } + + // otherwise insert the new element into the array in order + np := &KV{Key: k, Value: v} + for i := 0; i < len(child.KVs); i++ { + if bytes.Compare(k, child.KVs[i].Key) < 0 { + child.KVs = append(child.KVs[:i], append([]*KV{np}, child.KVs[i:]...)...) + return nil + } + } + child.KVs = append(child.KVs, np) + return nil +} + +func (n *Node) insertChild(idx int, k []byte, v *cbg.Deferred) error { + if v == nil { + return ErrNotFound + } + + i := n.indexForBitPos(idx) + n.Bitfield.SetBit(n.Bitfield, idx, 1) + + p := &Pointer{KVs: []*KV{{Key: k, Value: v}}} + + n.Pointers = append(n.Pointers[:i], append([]*Pointer{p}, n.Pointers[i:]...)...) + return nil +} + +func (n *Node) setChild(i byte, p *Pointer) error { + n.Pointers[i] = p + return nil +} + +func (n *Node) rmChild(i byte, idx int) error { + copy(n.Pointers[i:], n.Pointers[i+1:]) + n.Pointers = n.Pointers[:len(n.Pointers)-1] + n.Bitfield.SetBit(n.Bitfield, idx, 0) + + return nil +} + +func (n *Node) getChild(i byte) *Pointer { + if int(i) >= len(n.Pointers) || i < 0 { + return nil + } + + return n.Pointers[i] +} + +func (n *Node) Copy() *Node { + nn := NewNode(n.store) + nn.bitWidth = n.bitWidth + nn.Bitfield.Set(n.Bitfield) + nn.Pointers = make([]*Pointer, len(n.Pointers)) + + for i, p := range n.Pointers { + pp := &Pointer{} + if p.cache != nil { + pp.cache = p.cache.Copy() + } + pp.Link = p.Link + if p.KVs != nil { + pp.KVs = make([]*KV, len(p.KVs)) + for j, kv := range p.KVs { + pp.KVs[j] = &KV{Key: kv.Key, Value: kv.Value} + } + } + nn.Pointers[i] = pp + } + + return nn +} + +func (p *Pointer) isShard() bool { + return p.Link.Defined() +} + +func (n *Node) ForEach(ctx context.Context, f func(k string, val interface{}) error) error { + for _, p := range n.Pointers { + if p.isShard() { + chnd, err := p.loadChild(ctx, n.store, n.bitWidth) + if err != nil { + return err + } + + if err := chnd.ForEach(ctx, f); err != nil { + return err + } + } else { + for _, kv := range p.KVs { + // TODO: consider removing 'strings as keys' from every interface, go full-on bytes everywhere + if err := f(string(kv.Key), kv.Value); err != nil { + return err + } + } + } + } + return nil +} diff --git a/vendor/github.com/ipfs/go-hamt-ipld/hash.go b/vendor/github.com/ipfs/go-hamt-ipld/hash.go new file mode 100644 index 0000000000..e784d2a215 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/hash.go @@ -0,0 +1,56 @@ +package hamt + +import ( + "fmt" + + "github.com/spaolacci/murmur3" +) + +// hashBits is a helper that allows the reading of the 'next n bits' as an integer. +type hashBits struct { + b []byte + consumed int +} + +func mkmask(n int) byte { + return (1 << uint(n)) - 1 +} + +// Next returns the next 'i' bits of the hashBits value as an integer, or an +// error if there aren't enough bits. +func (hb *hashBits) Next(i int) (int, error) { + if hb.consumed+i > len(hb.b)*8 { + return 0, fmt.Errorf("sharded directory too deep") + } + return hb.next(i), nil +} + +func (hb *hashBits) next(i int) int { + curbi := hb.consumed / 8 + leftb := 8 - (hb.consumed % 8) + + curb := hb.b[curbi] + if i == leftb { + out := int(mkmask(i) & curb) + hb.consumed += i + return out + } else if i < leftb { + a := curb & mkmask(leftb) // mask out the high bits we don't want + b := a & ^mkmask(leftb-i) // mask out the low bits we don't want + c := b >> uint(leftb-i) // shift whats left down + hb.consumed += i + return int(c) + } else { + out := int(mkmask(leftb) & curb) + out <<= uint(i - leftb) + hb.consumed += leftb + out += hb.next(i - leftb) + return out + } +} + +var hash = func(val []byte) []byte { + h := murmur3.New64() + h.Write(val) + return h.Sum(nil) +} diff --git a/vendor/github.com/ipfs/go-hamt-ipld/pointer_cbor.go b/vendor/github.com/ipfs/go-hamt-ipld/pointer_cbor.go new file mode 100644 index 0000000000..8734626336 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/pointer_cbor.go @@ -0,0 +1,133 @@ +package hamt + +import ( + "fmt" + "io" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var keyZero = []byte("0") +var keyOne = []byte("1") + +func (t *Pointer) MarshalCBOR(w io.Writer) error { + if t.Link != cid.Undef && len(t.KVs) > 0 { + return fmt.Errorf("hamt Pointer cannot have both a link and KVs") + } + + scratch := make([]byte, 9) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajMap, 1); err != nil { + return err + } + + if t.Link != cid.Undef { + // key for links is "0" + // Refmt (and the general IPLD data model currently) can't deal + // with non string keys. So we have this weird restriction right now + // hoping to be able to use integer keys soon + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, 1); err != nil { + return err + } + + if _, err := w.Write(keyZero); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.Link); err != nil { + return err + } + } else { + // key for KVs is "1" + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, 1); err != nil { + return err + } + + if _, err := w.Write(keyOne); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.KVs))); err != nil { + return err + } + + for _, kv := range t.KVs { + if err := kv.MarshalCBOR(w); err != nil { + return err + } + } + } + + return nil +} + +func (t *Pointer) UnmarshalCBOR(br io.Reader) error { + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of map") + } + + if extra != 1 { + return fmt.Errorf("Pointers should be a single element map") + } + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajTextString { + return fmt.Errorf("expected text string key") + } + + if val != 1 { + return fmt.Errorf("map keys in pointers must be a single byte long") + } + + if _, err := io.ReadAtLeast(br, scratch[:1], 1); err != nil { + return err + } + + switch scratch[0] { + case '0': + c, err := cbg.ReadCid(br) + if err != nil { + return err + } + t.Link = c + return nil + case '1': + maj, length, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected an array of KVs in cbor input") + } + + if length > 32 { + return fmt.Errorf("KV array in cbor input for pointer was too long") + } + + t.KVs = make([]*KV, length) + for i := 0; i < int(length); i++ { + var kv KV + if err := kv.UnmarshalCBOR(br); err != nil { + return err + } + + t.KVs[i] = &kv + } + + return nil + default: + return fmt.Errorf("invalid pointer map key in cbor input: %d", val) + } +} diff --git a/vendor/github.com/ipfs/go-hamt-ipld/uhamt.go b/vendor/github.com/ipfs/go-hamt-ipld/uhamt.go new file mode 100644 index 0000000000..e7cf8a4001 --- /dev/null +++ b/vendor/github.com/ipfs/go-hamt-ipld/uhamt.go @@ -0,0 +1,27 @@ +package hamt + +import ( + "math/big" + "math/bits" +) + +// indexForBitPos returns the index within the collapsed array corresponding to +// the given bit in the bitset. The collapsed array contains only one entry +// per bit set in the bitfield, and this function is used to map the indices. +func (n *Node) indexForBitPos(bp int) int { + return indexForBitPos(bp, n.Bitfield) +} + +func indexForBitPos(bp int, bitfield *big.Int) int { + var x uint + var count, i int + w := bitfield.Bits() + for x = uint(bp); x > bits.UintSize && i < len(w); x -= bits.UintSize { + count += bits.OnesCount(uint(w[i])) + i++ + } + if i == len(w) { + return count + } + return count + bits.OnesCount(uint(w[i])&((1< go-ipfs-blockstore implements a thin wrapper over a datastore, giving a clean interface for Getting and Putting block objects. + +## Lead Maintainer + +[Steven Allen](https://github.com/Stebalien) + + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Contribute](#contribute) +- [License](#license) + +## Install + +`go-ipfs-blockstore` works like a regular Go module: + +``` +> go get github.com/ipfs/go-ipfs-blockstore +``` + +## Usage + +``` +import "github.com/ipfs/go-ipfs-blockstore" +``` + +Check the [GoDoc documentation](https://godoc.org/github.com/ipfs/go-ipfs-blockstore) + +## Contribute + +PRs accepted. + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs, Inc. diff --git a/vendor/github.com/ipfs/go-ipfs-blockstore/arc_cache.go b/vendor/github.com/ipfs/go-ipfs-blockstore/arc_cache.go new file mode 100644 index 0000000000..e2b930dcad --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-blockstore/arc_cache.go @@ -0,0 +1,184 @@ +package blockstore + +import ( + "context" + + lru "github.com/hashicorp/golang-lru" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + metrics "github.com/ipfs/go-metrics-interface" +) + +type cacheHave bool +type cacheSize int + +// arccache wraps a BlockStore with an Adaptive Replacement Cache (ARC) for +// block Cids. This provides block access-time improvements, allowing +// to short-cut many searches without query-ing the underlying datastore. +type arccache struct { + arc *lru.TwoQueueCache + blockstore Blockstore + + hits metrics.Counter + total metrics.Counter +} + +func newARCCachedBS(ctx context.Context, bs Blockstore, lruSize int) (*arccache, error) { + arc, err := lru.New2Q(lruSize) + if err != nil { + return nil, err + } + c := &arccache{arc: arc, blockstore: bs} + c.hits = metrics.NewCtx(ctx, "arc.hits_total", "Number of ARC cache hits").Counter() + c.total = metrics.NewCtx(ctx, "arc_total", "Total number of ARC cache requests").Counter() + + return c, nil +} + +func (b *arccache) DeleteBlock(k cid.Cid) error { + if has, _, ok := b.hasCached(k); ok && !has { + return nil + } + + b.arc.Remove(k) // Invalidate cache before deleting. + err := b.blockstore.DeleteBlock(k) + if err == nil { + b.cacheHave(k, false) + } + return err +} + +// if ok == false has is inconclusive +// if ok == true then has respons to question: is it contained +func (b *arccache) hasCached(k cid.Cid) (has bool, size int, ok bool) { + b.total.Inc() + if !k.Defined() { + log.Error("undefined cid in arccache") + // Return cache invalid so the call to blockstore happens + // in case of invalid key and correct error is created. + return false, -1, false + } + + h, ok := b.arc.Get(string(k.Hash())) + if ok { + b.hits.Inc() + switch h := h.(type) { + case cacheHave: + return bool(h), -1, true + case cacheSize: + return true, int(h), true + } + } + return false, -1, false +} + +func (b *arccache) Has(k cid.Cid) (bool, error) { + if has, _, ok := b.hasCached(k); ok { + return has, nil + } + has, err := b.blockstore.Has(k) + if err != nil { + return false, err + } + b.cacheHave(k, has) + return has, nil +} + +func (b *arccache) GetSize(k cid.Cid) (int, error) { + if has, blockSize, ok := b.hasCached(k); ok { + if !has { + // don't have it, return + return -1, ErrNotFound + } + if blockSize >= 0 { + // have it and we know the size + return blockSize, nil + } + // we have it but don't know the size, ask the datastore. + } + blockSize, err := b.blockstore.GetSize(k) + if err == ErrNotFound { + b.cacheHave(k, false) + } else if err == nil { + b.cacheSize(k, blockSize) + } + return blockSize, err +} + +func (b *arccache) Get(k cid.Cid) (blocks.Block, error) { + if !k.Defined() { + log.Error("undefined cid in arc cache") + return nil, ErrNotFound + } + + if has, _, ok := b.hasCached(k); ok && !has { + return nil, ErrNotFound + } + + bl, err := b.blockstore.Get(k) + if bl == nil && err == ErrNotFound { + b.cacheHave(k, false) + } else if bl != nil { + b.cacheSize(k, len(bl.RawData())) + } + return bl, err +} + +func (b *arccache) Put(bl blocks.Block) error { + if has, _, ok := b.hasCached(bl.Cid()); ok && has { + return nil + } + + err := b.blockstore.Put(bl) + if err == nil { + b.cacheSize(bl.Cid(), len(bl.RawData())) + } + return err +} + +func (b *arccache) PutMany(bs []blocks.Block) error { + var good []blocks.Block + for _, block := range bs { + // call put on block if result is inconclusive or we are sure that + // the block isn't in storage + if has, _, ok := b.hasCached(block.Cid()); !ok || (ok && !has) { + good = append(good, block) + } + } + err := b.blockstore.PutMany(good) + if err != nil { + return err + } + for _, block := range good { + b.cacheSize(block.Cid(), len(block.RawData())) + } + return nil +} + +func (b *arccache) HashOnRead(enabled bool) { + b.blockstore.HashOnRead(enabled) +} + +func (b *arccache) cacheHave(c cid.Cid, have bool) { + b.arc.Add(string(c.Hash()), cacheHave(have)) +} + +func (b *arccache) cacheSize(c cid.Cid, blockSize int) { + b.arc.Add(string(c.Hash()), cacheSize(blockSize)) +} + +func (b *arccache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.blockstore.AllKeysChan(ctx) +} + +func (b *arccache) GCLock() Unlocker { + return b.blockstore.(GCBlockstore).GCLock() +} + +func (b *arccache) PinLock() Unlocker { + return b.blockstore.(GCBlockstore).PinLock() +} + +func (b *arccache) GCRequested() bool { + return b.blockstore.(GCBlockstore).GCRequested() +} diff --git a/vendor/github.com/ipfs/go-ipfs-blockstore/blockstore.go b/vendor/github.com/ipfs/go-ipfs-blockstore/blockstore.go new file mode 100644 index 0000000000..f8eb07a7d7 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-blockstore/blockstore.go @@ -0,0 +1,281 @@ +// Package blockstore implements a thin wrapper over a datastore, giving a +// clean interface for Getting and Putting block objects. +package blockstore + +import ( + "context" + "errors" + "sync" + "sync/atomic" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dsns "github.com/ipfs/go-datastore/namespace" + dsq "github.com/ipfs/go-datastore/query" + dshelp "github.com/ipfs/go-ipfs-ds-help" + logging "github.com/ipfs/go-log" + uatomic "go.uber.org/atomic" +) + +var log = logging.Logger("blockstore") + +// BlockPrefix namespaces blockstore datastores +var BlockPrefix = ds.NewKey("blocks") + +// ErrHashMismatch is an error returned when the hash of a block +// is different than expected. +var ErrHashMismatch = errors.New("block in storage has different hash than requested") + +// ErrNotFound is an error returned when a block is not found. +var ErrNotFound = errors.New("blockstore: block not found") + +// Blockstore wraps a Datastore block-centered methods and provides a layer +// of abstraction which allows to add different caching strategies. +type Blockstore interface { + DeleteBlock(cid.Cid) error + Has(cid.Cid) (bool, error) + Get(cid.Cid) (blocks.Block, error) + + // GetSize returns the CIDs mapped BlockSize + GetSize(cid.Cid) (int, error) + + // Put puts a given block to the underlying datastore + Put(blocks.Block) error + + // PutMany puts a slice of blocks at the same time using batching + // capabilities of the underlying datastore whenever possible. + PutMany([]blocks.Block) error + + // AllKeysChan returns a channel from which + // the CIDs in the Blockstore can be read. It should respect + // the given context, closing the channel if it becomes Done. + AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) + + // HashOnRead specifies if every read block should be + // rehashed to make sure it matches its CID. + HashOnRead(enabled bool) +} + +// GCLocker abstract functionality to lock a blockstore when performing +// garbage-collection operations. +type GCLocker interface { + // GCLock locks the blockstore for garbage collection. No operations + // that expect to finish with a pin should ocurr simultaneously. + // Reading during GC is safe, and requires no lock. + GCLock() Unlocker + + // PinLock locks the blockstore for sequences of puts expected to finish + // with a pin (before GC). Multiple put->pin sequences can write through + // at the same time, but no GC should happen simulatenously. + // Reading during Pinning is safe, and requires no lock. + PinLock() Unlocker + + // GcRequested returns true if GCLock has been called and is waiting to + // take the lock + GCRequested() bool +} + +// GCBlockstore is a blockstore that can safely run garbage-collection +// operations. +type GCBlockstore interface { + Blockstore + GCLocker +} + +// NewGCBlockstore returns a default implementation of GCBlockstore +// using the given Blockstore and GCLocker. +func NewGCBlockstore(bs Blockstore, gcl GCLocker) GCBlockstore { + return gcBlockstore{bs, gcl} +} + +type gcBlockstore struct { + Blockstore + GCLocker +} + +// NewBlockstore returns a default Blockstore implementation +// using the provided datastore.Batching backend. +func NewBlockstore(d ds.Batching) Blockstore { + var dsb ds.Batching + dd := dsns.Wrap(d, BlockPrefix) + dsb = dd + return &blockstore{ + datastore: dsb, + rehash: uatomic.NewBool(false), + } +} + +type blockstore struct { + datastore ds.Batching + + rehash *uatomic.Bool +} + +func (bs *blockstore) HashOnRead(enabled bool) { + bs.rehash.Store(enabled) +} + +func (bs *blockstore) Get(k cid.Cid) (blocks.Block, error) { + if !k.Defined() { + log.Error("undefined cid in blockstore") + return nil, ErrNotFound + } + bdata, err := bs.datastore.Get(dshelp.MultihashToDsKey(k.Hash())) + if err == ds.ErrNotFound { + return nil, ErrNotFound + } + if err != nil { + return nil, err + } + if bs.rehash.Load() { + rbcid, err := k.Prefix().Sum(bdata) + if err != nil { + return nil, err + } + + if !rbcid.Equals(k) { + return nil, ErrHashMismatch + } + + return blocks.NewBlockWithCid(bdata, rbcid) + } + return blocks.NewBlockWithCid(bdata, k) +} + +func (bs *blockstore) Put(block blocks.Block) error { + k := dshelp.MultihashToDsKey(block.Cid().Hash()) + + // Has is cheaper than Put, so see if we already have it + exists, err := bs.datastore.Has(k) + if err == nil && exists { + return nil // already stored. + } + return bs.datastore.Put(k, block.RawData()) +} + +func (bs *blockstore) PutMany(blocks []blocks.Block) error { + t, err := bs.datastore.Batch() + if err != nil { + return err + } + for _, b := range blocks { + k := dshelp.MultihashToDsKey(b.Cid().Hash()) + exists, err := bs.datastore.Has(k) + if err == nil && exists { + continue + } + + err = t.Put(k, b.RawData()) + if err != nil { + return err + } + } + return t.Commit() +} + +func (bs *blockstore) Has(k cid.Cid) (bool, error) { + return bs.datastore.Has(dshelp.MultihashToDsKey(k.Hash())) +} + +func (bs *blockstore) GetSize(k cid.Cid) (int, error) { + size, err := bs.datastore.GetSize(dshelp.MultihashToDsKey(k.Hash())) + if err == ds.ErrNotFound { + return -1, ErrNotFound + } + return size, err +} + +func (bs *blockstore) DeleteBlock(k cid.Cid) error { + return bs.datastore.Delete(dshelp.MultihashToDsKey(k.Hash())) +} + +// AllKeysChan runs a query for keys from the blockstore. +// this is very simplistic, in the future, take dsq.Query as a param? +// +// AllKeysChan respects context. +func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + + // KeysOnly, because that would be _a lot_ of data. + q := dsq.Query{KeysOnly: true} + res, err := bs.datastore.Query(q) + if err != nil { + return nil, err + } + + output := make(chan cid.Cid, dsq.KeysOnlyBufSize) + go func() { + defer func() { + res.Close() // ensure exit (signals early exit, too) + close(output) + }() + + for { + e, ok := res.NextSync() + if !ok { + return + } + if e.Error != nil { + log.Errorf("blockstore.AllKeysChan got err: %s", e.Error) + return + } + + // need to convert to key.Key using key.KeyFromDsKey. + bk, err := dshelp.BinaryFromDsKey(ds.RawKey(e.Key)) + if err != nil { + log.Warningf("error parsing key from binary: %s", err) + continue + } + k := cid.NewCidV1(cid.Raw, bk) + select { + case <-ctx.Done(): + return + case output <- k: + } + } + }() + + return output, nil +} + +// NewGCLocker returns a default implementation of +// GCLocker using standard [RW] mutexes. +func NewGCLocker() GCLocker { + return &gclocker{} +} + +type gclocker struct { + lk sync.RWMutex + gcreq int32 +} + +// Unlocker represents an object which can Unlock +// something. +type Unlocker interface { + Unlock() +} + +type unlocker struct { + unlock func() +} + +func (u *unlocker) Unlock() { + u.unlock() + u.unlock = nil // ensure its not called twice +} + +func (bs *gclocker) GCLock() Unlocker { + atomic.AddInt32(&bs.gcreq, 1) + bs.lk.Lock() + atomic.AddInt32(&bs.gcreq, -1) + return &unlocker{bs.lk.Unlock} +} + +func (bs *gclocker) PinLock() Unlocker { + bs.lk.RLock() + return &unlocker{bs.lk.RUnlock} +} + +func (bs *gclocker) GCRequested() bool { + return atomic.LoadInt32(&bs.gcreq) > 0 +} diff --git a/vendor/github.com/ipfs/go-ipfs-blockstore/bloom_cache.go b/vendor/github.com/ipfs/go-ipfs-blockstore/bloom_cache.go new file mode 100644 index 0000000000..b4fadc2ef0 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-blockstore/bloom_cache.go @@ -0,0 +1,204 @@ +package blockstore + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + bloom "github.com/ipfs/bbloom" + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + metrics "github.com/ipfs/go-metrics-interface" +) + +// bloomCached returns a Blockstore that caches Has requests using a Bloom +// filter. bloomSize is size of bloom filter in bytes. hashCount specifies the +// number of hashing functions in the bloom filter (usually known as k). +func bloomCached(ctx context.Context, bs Blockstore, bloomSize, hashCount int) (*bloomcache, error) { + bl, err := bloom.New(float64(bloomSize), float64(hashCount)) + if err != nil { + return nil, err + } + bc := &bloomcache{ + blockstore: bs, + bloom: bl, + hits: metrics.NewCtx(ctx, "bloom.hits_total", + "Number of cache hits in bloom cache").Counter(), + total: metrics.NewCtx(ctx, "bloom_total", + "Total number of requests to bloom cache").Counter(), + buildChan: make(chan struct{}), + } + go func() { + err := bc.build(ctx) + if err != nil { + select { + case <-ctx.Done(): + log.Warning("Cache rebuild closed by context finishing: ", err) + default: + log.Error(err) + } + return + } + if metrics.Active() { + fill := metrics.NewCtx(ctx, "bloom_fill_ratio", + "Ratio of bloom filter fullnes, (updated once a minute)").Gauge() + + t := time.NewTicker(1 * time.Minute) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + fill.Set(bc.bloom.FillRatioTS()) + } + } + } + }() + return bc, nil +} + +type bloomcache struct { + active int32 + + bloom *bloom.Bloom + buildErr error + + buildChan chan struct{} + blockstore Blockstore + + // Statistics + hits metrics.Counter + total metrics.Counter +} + +func (b *bloomcache) BloomActive() bool { + return atomic.LoadInt32(&b.active) != 0 +} + +func (b *bloomcache) Wait(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-b.buildChan: + return b.buildErr + } +} + +func (b *bloomcache) build(ctx context.Context) error { + evt := log.EventBegin(ctx, "bloomcache.build") + defer evt.Done() + defer close(b.buildChan) + + ch, err := b.blockstore.AllKeysChan(ctx) + if err != nil { + b.buildErr = fmt.Errorf("AllKeysChan failed in bloomcache rebuild with: %v", err) + return b.buildErr + } + for { + select { + case key, ok := <-ch: + if !ok { + atomic.StoreInt32(&b.active, 1) + return nil + } + b.bloom.AddTS(key.Hash()) // Use binary key, the more compact the better + case <-ctx.Done(): + b.buildErr = ctx.Err() + return b.buildErr + } + } +} + +func (b *bloomcache) DeleteBlock(k cid.Cid) error { + if has, ok := b.hasCached(k); ok && !has { + return nil + } + + return b.blockstore.DeleteBlock(k) +} + +// if ok == false has is inconclusive +// if ok == true then has respons to question: is it contained +func (b *bloomcache) hasCached(k cid.Cid) (has bool, ok bool) { + b.total.Inc() + if !k.Defined() { + log.Error("undefined in bloom cache") + // Return cache invalid so call to blockstore + // in case of invalid key is forwarded deeper + return false, false + } + if b.BloomActive() { + blr := b.bloom.HasTS(k.Hash()) + if !blr { // not contained in bloom is only conclusive answer bloom gives + b.hits.Inc() + return false, true + } + } + return false, false +} + +func (b *bloomcache) Has(k cid.Cid) (bool, error) { + if has, ok := b.hasCached(k); ok { + return has, nil + } + + return b.blockstore.Has(k) +} + +func (b *bloomcache) GetSize(k cid.Cid) (int, error) { + return b.blockstore.GetSize(k) +} + +func (b *bloomcache) Get(k cid.Cid) (blocks.Block, error) { + if has, ok := b.hasCached(k); ok && !has { + return nil, ErrNotFound + } + + return b.blockstore.Get(k) +} + +func (b *bloomcache) Put(bl blocks.Block) error { + // See comment in PutMany + err := b.blockstore.Put(bl) + if err == nil { + b.bloom.AddTS(bl.Cid().Hash()) + } + return err +} + +func (b *bloomcache) PutMany(bs []blocks.Block) error { + // bloom cache gives only conclusive resulty if key is not contained + // to reduce number of puts we need conclusive information if block is contained + // this means that PutMany can't be improved with bloom cache so we just + // just do a passthrough. + err := b.blockstore.PutMany(bs) + if err != nil { + return err + } + for _, bl := range bs { + b.bloom.AddTS(bl.Cid().Hash()) + } + return nil +} + +func (b *bloomcache) HashOnRead(enabled bool) { + b.blockstore.HashOnRead(enabled) +} + +func (b *bloomcache) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.blockstore.AllKeysChan(ctx) +} + +func (b *bloomcache) GCLock() Unlocker { + return b.blockstore.(GCBlockstore).GCLock() +} + +func (b *bloomcache) PinLock() Unlocker { + return b.blockstore.(GCBlockstore).PinLock() +} + +func (b *bloomcache) GCRequested() bool { + return b.blockstore.(GCBlockstore).GCRequested() +} diff --git a/vendor/github.com/ipfs/go-ipfs-blockstore/caching.go b/vendor/github.com/ipfs/go-ipfs-blockstore/caching.go new file mode 100644 index 0000000000..798b84ce2b --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-blockstore/caching.go @@ -0,0 +1,55 @@ +package blockstore + +import ( + "context" + "errors" + + metrics "github.com/ipfs/go-metrics-interface" +) + +// CacheOpts wraps options for CachedBlockStore(). +// Next to each option is it aproximate memory usage per unit +type CacheOpts struct { + HasBloomFilterSize int // 1 byte + HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers + HasARCCacheSize int // 32 bytes +} + +// DefaultCacheOpts returns a CacheOpts initialized with default values. +func DefaultCacheOpts() CacheOpts { + return CacheOpts{ + HasBloomFilterSize: 512 << 10, + HasBloomFilterHashes: 7, + HasARCCacheSize: 64 << 10, + } +} + +// CachedBlockstore returns a blockstore wrapped in an ARCCache and +// then in a bloom filter cache, if the options indicate it. +func CachedBlockstore( + ctx context.Context, + bs Blockstore, + opts CacheOpts) (cbs Blockstore, err error) { + cbs = bs + + if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 || + opts.HasARCCacheSize < 0 { + return nil, errors.New("all options for cache need to be greater than zero") + } + + if opts.HasBloomFilterSize != 0 && opts.HasBloomFilterHashes == 0 { + return nil, errors.New("bloom filter hash count can't be 0 when there is size set") + } + + ctx = metrics.CtxSubScope(ctx, "bs.cache") + + if opts.HasARCCacheSize > 0 { + cbs, err = newARCCachedBS(ctx, cbs, opts.HasARCCacheSize) + } + if opts.HasBloomFilterSize != 0 { + // *8 because of bytes to bits conversion + cbs, err = bloomCached(ctx, cbs, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes) + } + + return cbs, err +} diff --git a/vendor/github.com/ipfs/go-ipfs-blockstore/go.mod b/vendor/github.com/ipfs/go-ipfs-blockstore/go.mod new file mode 100644 index 0000000000..f7adb3de6a --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-blockstore/go.mod @@ -0,0 +1,17 @@ +module github.com/ipfs/go-ipfs-blockstore + +require ( + github.com/hashicorp/golang-lru v0.5.4 + github.com/ipfs/bbloom v0.0.4 + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.7 + github.com/ipfs/go-datastore v0.4.2 + github.com/ipfs/go-ipfs-ds-help v1.0.0 + github.com/ipfs/go-ipfs-util v0.0.1 + github.com/ipfs/go-log v0.0.1 + github.com/ipfs/go-metrics-interface v0.0.1 + github.com/multiformats/go-multihash v0.0.13 + go.uber.org/atomic v1.6.0 +) + +go 1.13 diff --git a/vendor/github.com/ipfs/go-ipfs-blockstore/go.sum b/vendor/github.com/ipfs/go-ipfs-blockstore/go.sum new file mode 100644 index 0000000000..a91a726b2f --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-blockstore/go.sum @@ -0,0 +1,116 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= +github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-datastore v0.4.1 h1:W4ZfzyhNi3xmuU5dQhjfuRn/wFuqEE1KnOmmQiOevEY= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.2 h1:h8/n7WPzhp239kkLws+epN3Ic7YtcBPgcaXfEfdVDWM= +github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= +github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= +github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7 h1:C2F/nMkR/9sfUTpvR3QrjBuTdvMUC/cFajkphs1YLQo= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/ipfs/go-ipfs-blockstore/idstore.go b/vendor/github.com/ipfs/go-ipfs-blockstore/idstore.go new file mode 100644 index 0000000000..477da70b20 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-blockstore/idstore.go @@ -0,0 +1,91 @@ +package blockstore + +import ( + "context" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +// idstore wraps a BlockStore to add support for identity hashes +type idstore struct { + bs Blockstore +} + +func NewIdStore(bs Blockstore) Blockstore { + return &idstore{bs} +} + +func extractContents(k cid.Cid) (bool, []byte) { + // Pre-check by calling Prefix(), this much faster than extracting the hash. + if k.Prefix().MhType != mh.IDENTITY { + return false, nil + } + + dmh, err := mh.Decode(k.Hash()) + if err != nil || dmh.Code != mh.ID { + return false, nil + } + return true, dmh.Digest +} + +func (b *idstore) DeleteBlock(k cid.Cid) error { + isId, _ := extractContents(k) + if isId { + return nil + } + return b.bs.DeleteBlock(k) +} + +func (b *idstore) Has(k cid.Cid) (bool, error) { + isId, _ := extractContents(k) + if isId { + return true, nil + } + return b.bs.Has(k) +} + +func (b *idstore) GetSize(k cid.Cid) (int, error) { + isId, bdata := extractContents(k) + if isId { + return len(bdata), nil + } + return b.bs.GetSize(k) +} + +func (b *idstore) Get(k cid.Cid) (blocks.Block, error) { + isId, bdata := extractContents(k) + if isId { + return blocks.NewBlockWithCid(bdata, k) + } + return b.bs.Get(k) +} + +func (b *idstore) Put(bl blocks.Block) error { + isId, _ := extractContents(bl.Cid()) + if isId { + return nil + } + return b.bs.Put(bl) +} + +func (b *idstore) PutMany(bs []blocks.Block) error { + toPut := make([]blocks.Block, 0, len(bs)) + for _, bl := range bs { + isId, _ := extractContents(bl.Cid()) + if isId { + continue + } + toPut = append(toPut, bl) + } + return b.bs.PutMany(toPut) +} + +func (b *idstore) HashOnRead(enabled bool) { + b.bs.HashOnRead(enabled) +} + +func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return b.bs.AllKeysChan(ctx) +} diff --git a/vendor/github.com/ipfs/go-ipfs-ds-help/.travis.yml b/vendor/github.com/ipfs/go-ipfs-ds-help/.travis.yml new file mode 100644 index 0000000000..a156d3eb5e --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-ds-help/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.13.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ipfs-ds-help/LICENSE b/vendor/github.com/ipfs/go-ipfs-ds-help/LICENSE new file mode 100644 index 0000000000..e4224df5b7 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-ds-help/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 IPFS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ipfs-ds-help/Makefile b/vendor/github.com/ipfs/go-ipfs-ds-help/Makefile new file mode 100644 index 0000000000..73f2841f61 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-ds-help/Makefile @@ -0,0 +1,18 @@ +all: deps +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go +deps: gx + gx --verbose install --global + gx-go rewrite +test: deps + gx test -v -race -coverprofile=coverage.txt -covermode=atomic . +rw: + gx-go rewrite +rwundo: + gx-go rewrite --undo +publish: rwundo + gx publish +.PHONY: all gx deps test rw rwundo publish + + diff --git a/vendor/github.com/ipfs/go-ipfs-ds-help/README.md b/vendor/github.com/ipfs/go-ipfs-ds-help/README.md new file mode 100644 index 0000000000..2af3bff469 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-ds-help/README.md @@ -0,0 +1,44 @@ +# go-ipfs-ds-help + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ipfs-ds-help?status.svg)](https://godoc.org/github.com/ipfs/go-ipfs-ds-help) +[![Build Status](https://travis-ci.org/ipfs/go-ipfs-ds-help.svg?branch=master)](https://travis-ci.org/ipfs/go-ipfs-ds-help) + +> go-ipfs-ds-help provides utilities for parsing and creating datastore keys used by go-ipfs. + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Contribute](#contribute) +- [License](#license) + +## Install + +`go-ipfs-ds-help` works like a regular Go module: + +``` +> go get github.com/ipfs/go-ipfs-ds-help +``` + +## Usage + +``` +import "github.com/ipfs/go-ipfs-ds-help" +``` + +Check the [GoDoc documentation](https://godoc.org/github.com/ipfs/go-ipfs-ds-help) + +This module uses [Gx](https://github.com/whyrusleeping/gx) to manage dependencies. You can use `make all` to build it with the `gx` dependencies. + +## Contribute + +PRs accepted. + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs, Inc. diff --git a/vendor/github.com/ipfs/go-ipfs-ds-help/go.mod b/vendor/github.com/ipfs/go-ipfs-ds-help/go.mod new file mode 100644 index 0000000000..0c91c87072 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-ds-help/go.mod @@ -0,0 +1,10 @@ +module github.com/ipfs/go-ipfs-ds-help + +require ( + github.com/ipfs/go-cid v0.0.5 + github.com/ipfs/go-datastore v0.4.1 + github.com/multiformats/go-base32 v0.0.3 + github.com/multiformats/go-multihash v0.0.13 +) + +go 1.13 diff --git a/vendor/github.com/ipfs/go-ipfs-ds-help/go.sum b/vendor/github.com/ipfs/go-ipfs-ds-help/go.sum new file mode 100644 index 0000000000..da0e2055f5 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-ds-help/go.sum @@ -0,0 +1,56 @@ +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ipfs/go-cid v0.0.4 h1:UlfXKrZx1DjZoBhQHmNHLC1fK1dUJDN20Y28A7s+gJ8= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-datastore v0.1.1 h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1 h1:SS1t869a6cctoSYmZXUk8eL6AzVXgASmKIWFNQkQ1jU= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.4.1 h1:W4ZfzyhNi3xmuU5dQhjfuRn/wFuqEE1KnOmmQiOevEY= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/ipfs/go-ipfs-ds-help/key.go b/vendor/github.com/ipfs/go-ipfs-ds-help/key.go new file mode 100644 index 0000000000..32b73a61e6 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-ds-help/key.go @@ -0,0 +1,51 @@ +// Package dshelp provides utilities for parsing and creating +// datastore keys used by go-ipfs +package dshelp + +import ( + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/multiformats/go-base32" + mh "github.com/multiformats/go-multihash" +) + +// NewKeyFromBinary creates a new key from a byte slice. +func NewKeyFromBinary(rawKey []byte) datastore.Key { + buf := make([]byte, 1+base32.RawStdEncoding.EncodedLen(len(rawKey))) + buf[0] = '/' + base32.RawStdEncoding.Encode(buf[1:], rawKey) + return datastore.RawKey(string(buf)) +} + +// BinaryFromDsKey returns the byte slice corresponding to the given Key. +func BinaryFromDsKey(k datastore.Key) ([]byte, error) { + return base32.RawStdEncoding.DecodeString(k.String()[1:]) +} + +// MultihashToDsKey creates a Key from the given Multihash. +// If working with Cids, you can call cid.Hash() to obtain +// the multihash. Note that different CIDs might represent +// the same multihash. +func MultihashToDsKey(k mh.Multihash) datastore.Key { + return NewKeyFromBinary(k) +} + +// DsKeyToMultihash converts a dsKey to the corresponding Multihash. +func DsKeyToMultihash(dsKey datastore.Key) (mh.Multihash, error) { + kb, err := BinaryFromDsKey(dsKey) + if err != nil { + return nil, err + } + return mh.Cast(kb) +} + +// DsKeyToCidV1Raw converts the given Key (which should be a raw multihash +// key) to a Cid V1 of the given type (see +// https://godoc.org/github.com/ipfs/go-cid#pkg-constants). +func DsKeyToCidV1(dsKey datastore.Key, codecType uint64) (cid.Cid, error) { + hash, err := DsKeyToMultihash(dsKey) + if err != nil { + return cid.Cid{}, err + } + return cid.NewCidV1(codecType, hash), nil +} diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/.travis.yml b/vendor/github.com/ipfs/go-ipfs-exchange-interface/.travis.yml new file mode 100644 index 0000000000..4cfe98c242 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/LICENSE b/vendor/github.com/ipfs/go-ipfs-exchange-interface/LICENSE new file mode 100644 index 0000000000..e4224df5b7 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 IPFS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/Makefile b/vendor/github.com/ipfs/go-ipfs-exchange-interface/Makefile new file mode 100644 index 0000000000..20619413c9 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/Makefile @@ -0,0 +1,11 @@ +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite + +publish: + gx-go rewrite --undo + diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/README.md b/vendor/github.com/ipfs/go-ipfs-exchange-interface/README.md new file mode 100644 index 0000000000..8dbcfe1c3b --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/README.md @@ -0,0 +1,42 @@ +# go-ipfs-exchange-interface + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ipfs-exchange-interface?status.svg)](https://godoc.org/github.com/ipfs/go-ipfs-exchange-interface) +[![Build Status](https://travis-ci.org/ipfs/go-ipfs-exchange-interface.svg?branch=master)](https://travis-ci.org/ipfs/go-ipfs-exchange-interface) + +> go-ipfs-exchange-interface defines the IPFS exchange interface + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Contribute](#contribute) +- [License](#license) + +## Install + +`go-ipfs-exchange-interface` works like a regular Go module: + +``` +> go get github.com/ipfs/go-ipfs-exchange-interface +``` + +## Usage + +``` +import "github.com/ipfs/go-ipfs-exchange-interface" +``` + +Check the [GoDoc documentation](https://godoc.org/github.com/ipfs/go-ipfs-exchange-interface) + +## Contribute + +PRs accepted. + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs, Inc. diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/go.mod b/vendor/github.com/ipfs/go-ipfs-exchange-interface/go.mod new file mode 100644 index 0000000000..fade39b99e --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/go.mod @@ -0,0 +1,6 @@ +module github.com/ipfs/go-ipfs-exchange-interface + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.1 +) diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/go.sum b/vendor/github.com/ipfs/go-ipfs-exchange-interface/go.sum new file mode 100644 index 0000000000..aa764c6aaa --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/go.sum @@ -0,0 +1,26 @@ +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/interface.go b/vendor/github.com/ipfs/go-ipfs-exchange-interface/interface.go new file mode 100644 index 0000000000..c3032b2350 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/interface.go @@ -0,0 +1,37 @@ +// Package exchange defines the IPFS exchange interface +package exchange + +import ( + "context" + "io" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" +) + +// Interface defines the functionality of the IPFS block exchange protocol. +type Interface interface { // type Exchanger interface + Fetcher + + // TODO Should callers be concerned with whether the block was made + // available on the network? + HasBlock(blocks.Block) error + + IsOnline() bool + + io.Closer +} + +// Fetcher is an object that can be used to retrieve blocks +type Fetcher interface { + // GetBlock returns the block associated with a given key. + GetBlock(context.Context, cid.Cid) (blocks.Block, error) + GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) +} + +// SessionExchange is an exchange.Interface which supports +// sessions. +type SessionExchange interface { + Interface + NewSession(context.Context) Fetcher +} diff --git a/vendor/github.com/ipfs/go-ipfs-exchange-interface/package.json b/vendor/github.com/ipfs/go-ipfs-exchange-interface/package.json new file mode 100644 index 0000000000..c5f83220d5 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-exchange-interface/package.json @@ -0,0 +1,30 @@ +{ + "author": "hsanjuan", + "bugs": { + "url": "https://github.com/ipfs/go-ipfs-exchange-interface" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-ipfs-exchange-interface" + }, + "gxDependencies": [ + { + "author": "stebalien", + "hash": "QmYYLnAzR28nAQ4U5MFniLprnktu6eTFKibeNt96V21EZK", + "name": "go-block-format", + "version": "0.2.2" + }, + { + "author": "whyrusleeping", + "hash": "QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN", + "name": "go-cid", + "version": "0.9.3" + } + ], + "gxVersion": "0.12.1", + "language": "go", + "license": "MIT", + "name": "go-ipfs-exchange-interface", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "0.1.3" +} + diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/.gitignore b/vendor/github.com/ipfs/go-ipfs-posinfo/.gitignore new file mode 100644 index 0000000000..a1338d6851 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/.travis.yml b/vendor/github.com/ipfs/go-ipfs-posinfo/.travis.yml new file mode 100644 index 0000000000..4cfe98c242 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/LICENSE b/vendor/github.com/ipfs/go-ipfs-posinfo/LICENSE new file mode 100644 index 0000000000..e4224df5b7 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 IPFS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/Makefile b/vendor/github.com/ipfs/go-ipfs-posinfo/Makefile new file mode 100644 index 0000000000..24d71558e7 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/Makefile @@ -0,0 +1,18 @@ +all: deps +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go +deps: gx + gx --verbose install --global + gx-go rewrite +test: deps + go test -v -covermode count -coverprofile=coverage.out . +rw: + gx-go rewrite +rwundo: + gx-go rewrite --undo +publish: rwundo + gx publish +.PHONY: all gx deps test rw rwundo publish + + diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/README.md b/vendor/github.com/ipfs/go-ipfs-posinfo/README.md new file mode 100644 index 0000000000..bd509c17e0 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/README.md @@ -0,0 +1,37 @@ +# go-ipfs-posinfo + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-ipfs-posinfo?status.svg)](https://godoc.org/github.com/ipfs/go-ipfs-posinfo) +[![Build Status](https://travis-ci.org/ipfs/go-ipfs-posinfo.svg?branch=master)](https://travis-ci.org/ipfs/go-ipfs-posinfo) + +> Posinfo wraps offset information for ipfs filestore nodes + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Contribute](#contribute) +- [License](#license) + +## Install + +``` +go get github.com/ipfs/go-ipfs-posinfo +``` + +## Usage + +See the [GoDoc documentation](https://godoc.org/github.com/ipfs/go-ipfs-posinfo) + + +## Contribute + +PRs accepted. + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs, Inc. diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/go.mod b/vendor/github.com/ipfs/go-ipfs-posinfo/go.mod new file mode 100644 index 0000000000..d006408484 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/go.mod @@ -0,0 +1,3 @@ +module github.com/ipfs/go-ipfs-posinfo + +require github.com/ipfs/go-ipld-format v0.0.1 diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/go.sum b/vendor/github.com/ipfs/go-ipfs-posinfo/go.sum new file mode 100644 index 0000000000..9e2d1534a1 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/go.sum @@ -0,0 +1,28 @@ +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/package.json b/vendor/github.com/ipfs/go-ipfs-posinfo/package.json new file mode 100644 index 0000000000..f1815f5825 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/package.json @@ -0,0 +1,24 @@ +{ + "author": "hector", + "bugs": { + "url": "https://github.com/ipfs/go-ipfs-posinfo" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-ipfs-posinfo" + }, + "gxDependencies": [ + { + "author": "whyrusleeping", + "hash": "QmZ6nzCLwGLVfRzYLpD7pW6UNuBDKEcA2imJtVpbEx2rxy", + "name": "go-ipld-format", + "version": "0.8.1" + } + ], + "gxVersion": "0.12.1", + "language": "go", + "license": "MIT", + "name": "go-ipfs-posinfo", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "0.1.5" +} + diff --git a/vendor/github.com/ipfs/go-ipfs-posinfo/posinfo.go b/vendor/github.com/ipfs/go-ipfs-posinfo/posinfo.go new file mode 100644 index 0000000000..0b32c89da1 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-posinfo/posinfo.go @@ -0,0 +1,23 @@ +// Package posinfo wraps offset information used by ipfs filestore nodes +package posinfo + +import ( + "os" + + ipld "github.com/ipfs/go-ipld-format" +) + +// PosInfo stores information about the file offset, its path and +// stat. +type PosInfo struct { + Offset uint64 + FullPath string + Stat os.FileInfo // can be nil +} + +// FilestoreNode is an ipld.Node which arries PosInfo with it +// allowing to map it directly to a filesystem object. +type FilestoreNode struct { + ipld.Node + PosInfo *PosInfo +} diff --git a/vendor/github.com/ipfs/go-ipfs-util/.gitignore b/vendor/github.com/ipfs/go-ipfs-util/.gitignore new file mode 100644 index 0000000000..1377554ebe --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/vendor/github.com/ipfs/go-ipfs-util/.travis.yml b/vendor/github.com/ipfs/go-ipfs-util/.travis.yml new file mode 100644 index 0000000000..4cfe98c242 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ipfs-util/LICENSE b/vendor/github.com/ipfs/go-ipfs-util/LICENSE new file mode 100644 index 0000000000..9ce9744462 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ipfs-util/README.md b/vendor/github.com/ipfs/go-ipfs-util/README.md new file mode 100644 index 0000000000..33bff12cd5 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/README.md @@ -0,0 +1,45 @@ +# go-ipfs-util + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![](https://img.shields.io/badge/discussion_repo-go_to_issues-brightgreen.svg?style=flat-square)](https://github.com/ipfs/NAME/issues) + +> Common utilities used by go-ipfs and other related go packages + +## Install + +This is a Go module which can be installed with `go get github.com/ipfs/go-ipfs-util`. `go-ipfs-util` is however packaged with Gx, so it is recommended to use Gx to install it (see Usage section). + +## Usage + +This module is packaged with [Gx](https://github.com/whyrusleeping/gx). +In order to use it in your own project do: + +``` +go get -u github.com/whyrusleeping/gx +go get -u github.com/whyrusleeping/gx-go +cd +gx init +gx import github.com/ipfs/go-ipfs-util +gx install --global +gx-go --rewrite +``` + +Please check [Gx](https://github.com/whyrusleeping/gx) and [Gx-go](https://github.com/whyrusleeping/gx-go) documentation for more information. + + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-ipfs-util/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + +## License + +MIT diff --git a/vendor/github.com/ipfs/go-ipfs-util/file.go b/vendor/github.com/ipfs/go-ipfs-util/file.go new file mode 100644 index 0000000000..e6e30df4d3 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/file.go @@ -0,0 +1,12 @@ +package util + +import "os" + +// FileExists check if the file with the given path exits. +func FileExists(filename string) bool { + fi, err := os.Lstat(filename) + if fi != nil || (err != nil && !os.IsNotExist(err)) { + return true + } + return false +} diff --git a/vendor/github.com/ipfs/go-ipfs-util/go.mod b/vendor/github.com/ipfs/go-ipfs-util/go.mod new file mode 100644 index 0000000000..112c925b57 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/go.mod @@ -0,0 +1,6 @@ +module github.com/ipfs/go-ipfs-util + +require ( + github.com/mr-tron/base58 v1.1.0 + github.com/multiformats/go-multihash v0.0.1 +) diff --git a/vendor/github.com/ipfs/go-ipfs-util/go.sum b/vendor/github.com/ipfs/go-ipfs-util/go.sum new file mode 100644 index 0000000000..25c9723e81 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/go.sum @@ -0,0 +1,16 @@ +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/ipfs/go-ipfs-util/package.json b/vendor/github.com/ipfs/go-ipfs-util/package.json new file mode 100644 index 0000000000..3c3ed116bc --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/package.json @@ -0,0 +1,28 @@ +{ + "author": "whyrusleeping", + "bugs": {}, + "gx": { + "dvcsimport": "github.com/ipfs/go-ipfs-util" + }, + "gxDependencies": [ + { + "author": "multiformats", + "hash": "QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW", + "name": "go-multihash", + "version": "1.0.9" + }, + { + "author": "mr-tron", + "hash": "QmWFAMPqsEyUX7gDUsRVmMWz59FxSpJ1b2v6bJ1yYzo7jY", + "name": "go-base58-fast", + "version": "0.1.1" + } + ], + "gxVersion": "0.9.1", + "language": "go", + "license": "", + "name": "go-ipfs-util", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.2.9" +} + diff --git a/vendor/github.com/ipfs/go-ipfs-util/time.go b/vendor/github.com/ipfs/go-ipfs-util/time.go new file mode 100644 index 0000000000..37d720fb1b --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/time.go @@ -0,0 +1,22 @@ +package util + +import "time" + +// TimeFormatIpfs is the format ipfs uses to represent time in string form. +var TimeFormatIpfs = time.RFC3339Nano + +// ParseRFC3339 parses an RFC3339Nano-formatted time stamp and +// returns the UTC time. +func ParseRFC3339(s string) (time.Time, error) { + t, err := time.Parse(TimeFormatIpfs, s) + if err != nil { + return time.Time{}, err + } + return t.UTC(), nil +} + +// FormatRFC3339 returns the string representation of the +// UTC value of the given time in RFC3339Nano format. +func FormatRFC3339(t time.Time) string { + return t.UTC().Format(TimeFormatIpfs) +} diff --git a/vendor/github.com/ipfs/go-ipfs-util/util.go b/vendor/github.com/ipfs/go-ipfs-util/util.go new file mode 100644 index 0000000000..8ebe3c706f --- /dev/null +++ b/vendor/github.com/ipfs/go-ipfs-util/util.go @@ -0,0 +1,158 @@ +// Package util implements various utility functions used within ipfs +// that do not currently have a better place to live. +package util + +import ( + "errors" + "io" + "math/rand" + "os" + "path/filepath" + "runtime/debug" + "strings" + "time" + + b58 "github.com/mr-tron/base58/base58" + mh "github.com/multiformats/go-multihash" +) + +// DefaultIpfsHash is the current default hash function used by IPFS. +const DefaultIpfsHash = mh.SHA2_256 + +// Debug is a global flag for debugging. +var Debug bool + +// ErrNotImplemented signifies a function has not been implemented yet. +var ErrNotImplemented = errors.New("Error: not implemented yet.") + +// ErrTimeout implies that a timeout has been triggered +var ErrTimeout = errors.New("Error: Call timed out.") + +// ErrSearchIncomplete implies that a search type operation didnt +// find the expected node, but did find 'a' node. +var ErrSearchIncomplete = errors.New("Error: Search Incomplete.") + +// ErrCast is returned when a cast fails AND the program should not panic. +func ErrCast() error { + debug.PrintStack() + return errCast +} + +var errCast = errors.New("cast error") + +// ExpandPathnames takes a set of paths and turns them into absolute paths +func ExpandPathnames(paths []string) ([]string, error) { + var out []string + for _, p := range paths { + abspath, err := filepath.Abs(p) + if err != nil { + return nil, err + } + out = append(out, abspath) + } + return out, nil +} + +type randGen struct { + rand.Rand +} + +// NewTimeSeededRand returns a random bytes reader +// which has been initialized with the current time. +func NewTimeSeededRand() io.Reader { + src := rand.NewSource(time.Now().UnixNano()) + return &randGen{ + Rand: *rand.New(src), + } +} + +// NewSeededRand returns a random bytes reader +// initialized with the given seed. +func NewSeededRand(seed int64) io.Reader { + src := rand.NewSource(seed) + return &randGen{ + Rand: *rand.New(src), + } +} + +func (r *randGen) Read(p []byte) (n int, err error) { + for i := 0; i < len(p); i++ { + p[i] = byte(r.Rand.Intn(255)) + } + return len(p), nil +} + +// GetenvBool is the way to check an env var as a boolean +func GetenvBool(name string) bool { + v := strings.ToLower(os.Getenv(name)) + return v == "true" || v == "t" || v == "1" +} + +// MultiErr is a util to return multiple errors +type MultiErr []error + +func (m MultiErr) Error() string { + if len(m) == 0 { + return "no errors" + } + + s := "Multiple errors: " + for i, e := range m { + if i != 0 { + s += ", " + } + s += e.Error() + } + return s +} + +// Partition splits a subject 3 parts: prefix, separator, suffix. +// The first occurrence of the separator will be matched. +// ie. Partition("Ready, steady, go!", ", ") -> ["Ready", ", ", "steady, go!"] +func Partition(subject string, sep string) (string, string, string) { + if i := strings.Index(subject, sep); i != -1 { + return subject[:i], subject[i : i+len(sep)], subject[i+len(sep):] + } + return subject, "", "" +} + +// RPartition splits a subject 3 parts: prefix, separator, suffix. +// The last occurrence of the separator will be matched. +// ie. RPartition("Ready, steady, go!", ", ") -> ["Ready, steady", ", ", "go!"] +func RPartition(subject string, sep string) (string, string, string) { + if i := strings.LastIndex(subject, sep); i != -1 { + return subject[:i], subject[i : i+len(sep)], subject[i+len(sep):] + } + return subject, "", "" +} + +// Hash is the global IPFS hash function. uses multihash SHA2_256, 256 bits +func Hash(data []byte) mh.Multihash { + h, err := mh.Sum(data, DefaultIpfsHash, -1) + if err != nil { + // this error can be safely ignored (panic) because multihash only fails + // from the selection of hash function. If the fn + length are valid, it + // won't error. + panic("multihash failed to hash using SHA2_256.") + } + return h +} + +// IsValidHash checks whether a given hash is valid (b58 decodable, len > 0) +func IsValidHash(s string) bool { + out, err := b58.Decode(s) + if err != nil { + return false + } + _, err = mh.Cast(out) + return err == nil +} + +// XOR takes two byte slices, XORs them together, returns the resulting slice. +func XOR(a, b []byte) []byte { + c := make([]byte, len(a)) + for i := 0; i < len(a); i++ { + c[i] = a[i] ^ b[i] + } + return c +} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/.travis.yml b/vendor/github.com/ipfs/go-ipld-cbor/.travis.yml new file mode 100644 index 0000000000..923835bc58 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/.travis.yml @@ -0,0 +1,31 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ipld-cbor/LICENSE b/vendor/github.com/ipfs/go-ipld-cbor/LICENSE new file mode 100644 index 0000000000..26100332ba --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Jeromy Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ipld-cbor/Makefile b/vendor/github.com/ipfs/go-ipld-cbor/Makefile new file mode 100644 index 0000000000..0ad4560a70 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/Makefile @@ -0,0 +1,14 @@ +all: build + +build: + go build ./... +.PHONY: build + +test: + go test ./... +.PHONY: test + +benchmark: + go test -bench=./... +.PHONY: benchmark + diff --git a/vendor/github.com/ipfs/go-ipld-cbor/README.md b/vendor/github.com/ipfs/go-ipld-cbor/README.md new file mode 100644 index 0000000000..b74dbf3e62 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/README.md @@ -0,0 +1,57 @@ +go-ipld-cbor +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://libp2p.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Coverage Status](https://coveralls.io/repos/github/libp2p/js-libp2p-floodsub/badge.svg?branch=master)](https://coveralls.io/github/libp2p/js-libp2p-floodsub?branch=master) +[![Travis CI](https://travis-ci.org/libp2p/js-libp2p-floodsub.svg?branch=master)](https://travis-ci.org/libp2p/js-libp2p-floodsub) + +> An implementation of a cbor encoded merkledag object. + +## Lead Maintainer + +[Eric Myhre](https://github.com/warpfork) + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [API](#api) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +make install +``` + +## Usage + +TODO: Right now this package isn't the easiest to use, it will be getting better rapidly, soon. +```go +// Make an object +obj := map[interface{}]interface{}{ + "foo": "bar", + "baz": &Link{ + Target: myCid, + }, +} + +// Parse it into an ipldcbor node +nd, err := WrapMap(obj) + +fmt.Println(nd.Links()) + +``` + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Jeromy Johnson diff --git a/vendor/github.com/ipfs/go-ipld-cbor/codecov.yml b/vendor/github.com/ipfs/go-ipld-cbor/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/ipfs/go-ipld-cbor/encoding/cloner.go b/vendor/github.com/ipfs/go-ipld-cbor/encoding/cloner.go new file mode 100644 index 0000000000..d054eb4144 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/encoding/cloner.go @@ -0,0 +1,41 @@ +package encoding + +import ( + "sync" + + refmt "github.com/polydawn/refmt" + "github.com/polydawn/refmt/obj/atlas" +) + +// PooledCloner is a thread-safe pooled object cloner. +type PooledCloner struct { + pool sync.Pool +} + +// NewPooledCloner returns a PooledCloner with the given atlas. Do not copy +// after use. +func NewPooledCloner(atl atlas.Atlas) PooledCloner { + return PooledCloner{ + pool: sync.Pool{ + New: func() interface{} { + return refmt.NewCloner(atl) + }, + }, + } +} + +type selfCloner interface { + Clone(b interface{}) error +} + +// Clone clones a into b using a cloner from the pool. +func (p *PooledCloner) Clone(a, b interface{}) error { + if self, ok := a.(selfCloner); ok { + return self.Clone(b) + } + + c := p.pool.Get().(refmt.Cloner) + err := c.Clone(a, b) + p.pool.Put(c) + return err +} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/encoding/marshaller.go b/vendor/github.com/ipfs/go-ipld-cbor/encoding/marshaller.go new file mode 100644 index 0000000000..a517f472e6 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/encoding/marshaller.go @@ -0,0 +1,92 @@ +package encoding + +import ( + "bytes" + "io" + "sync" + + cbor "github.com/polydawn/refmt/cbor" + "github.com/polydawn/refmt/obj/atlas" +) + +type proxyWriter struct { + w io.Writer +} + +func (w *proxyWriter) Write(b []byte) (int, error) { + return w.w.Write(b) +} + +// Marshaller is a reusbale CBOR marshaller. +type Marshaller struct { + marshal *cbor.Marshaller + writer proxyWriter +} + +// NewMarshallerAtlased constructs a new cbor Marshaller using the given atlas. +func NewMarshallerAtlased(atl atlas.Atlas) *Marshaller { + m := new(Marshaller) + m.marshal = cbor.NewMarshallerAtlased(&m.writer, atl) + return m +} + +type cborMarshaler interface { + MarshalCBOR(w io.Writer) error +} + +// Encode encodes the given object to the given writer. +func (m *Marshaller) Encode(obj interface{}, w io.Writer) error { + m.writer.w = w + var err error + selfMarshaling, ok := obj.(cborMarshaler) + if ok { + err = selfMarshaling.MarshalCBOR(w) + } else { + err = m.marshal.Marshal(obj) + } + m.writer.w = nil + return err +} + +// Marshal marshels the given object to a byte slice. +func (m *Marshaller) Marshal(obj interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := m.Encode(obj, &buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// PooledMarshaller is a thread-safe pooled CBOR marshaller. +type PooledMarshaller struct { + pool sync.Pool +} + +// NewPooledMarshaller returns a PooledMarshaller with the given atlas. Do not +// copy after use. +func NewPooledMarshaller(atl atlas.Atlas) PooledMarshaller { + return PooledMarshaller{ + pool: sync.Pool{ + New: func() interface{} { + return NewMarshallerAtlased(atl) + }, + }, + } +} + +// Marshal marshals the passed object using the pool of marshallers. +func (p *PooledMarshaller) Marshal(obj interface{}) ([]byte, error) { + m := p.pool.Get().(*Marshaller) + bts, err := m.Marshal(obj) + p.pool.Put(m) + return bts, err +} + +// Encode encodes the passed object to the given writer using the pool of +// marshallers. +func (p *PooledMarshaller) Encode(obj interface{}, w io.Writer) error { + m := p.pool.Get().(*Marshaller) + err := m.Encode(obj, w) + p.pool.Put(m) + return err +} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/encoding/unmarshaller.go b/vendor/github.com/ipfs/go-ipld-cbor/encoding/unmarshaller.go new file mode 100644 index 0000000000..559cbeb8e1 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/encoding/unmarshaller.go @@ -0,0 +1,88 @@ +package encoding + +import ( + "bytes" + "io" + "sync" + + cbor "github.com/polydawn/refmt/cbor" + "github.com/polydawn/refmt/obj/atlas" +) + +type proxyReader struct { + r io.Reader +} + +func (r *proxyReader) Read(b []byte) (int, error) { + return r.r.Read(b) +} + +// Unmarshaller is a reusable CBOR unmarshaller. +type Unmarshaller struct { + unmarshal *cbor.Unmarshaller + reader proxyReader +} + +// NewUnmarshallerAtlased creates a new reusable unmarshaller. +func NewUnmarshallerAtlased(atl atlas.Atlas) *Unmarshaller { + m := new(Unmarshaller) + m.unmarshal = cbor.NewUnmarshallerAtlased(cbor.DecodeOptions{CoerceUndefToNull: true}, &m.reader, atl) + return m +} + +type cborUnmarshaler interface { + UnmarshalCBOR(r io.Reader) error +} + +// Decode reads a CBOR object from the given reader and decodes it into the +// given object. +func (m *Unmarshaller) Decode(r io.Reader, obj interface{}) (err error) { + m.reader.r = r + selfUnmarshaler, ok := obj.(cborUnmarshaler) + if ok { + err = selfUnmarshaler.UnmarshalCBOR(r) + } else { + err = m.unmarshal.Unmarshal(obj) + } + m.reader.r = nil + return err +} + +// Unmarshal unmarshals the given CBOR byte slice into the given object. +func (m *Unmarshaller) Unmarshal(b []byte, obj interface{}) error { + return m.Decode(bytes.NewReader(b), obj) +} + +// PooledUnmarshaller is a thread-safe pooled CBOR unmarshaller. +type PooledUnmarshaller struct { + pool sync.Pool +} + +// NewPooledUnmarshaller returns a PooledUnmarshaller with the given atlas. Do +// not copy after use. +func NewPooledUnmarshaller(atl atlas.Atlas) PooledUnmarshaller { + return PooledUnmarshaller{ + pool: sync.Pool{ + New: func() interface{} { + return NewUnmarshallerAtlased(atl) + }, + }, + } +} + +// Decode decodes an object from the passed reader into the given object using +// the pool of unmarshallers. +func (p *PooledUnmarshaller) Decode(r io.Reader, obj interface{}) error { + u := p.pool.Get().(*Unmarshaller) + err := u.Decode(r, obj) + p.pool.Put(u) + return err +} + +// Unmarshal unmarshals the passed object using the pool of unmarshallers. +func (p *PooledUnmarshaller) Unmarshal(b []byte, obj interface{}) error { + u := p.pool.Get().(*Unmarshaller) + err := u.Unmarshal(b, obj) + p.pool.Put(u) + return err +} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/go.mod b/vendor/github.com/ipfs/go-ipld-cbor/go.mod new file mode 100644 index 0000000000..51b8bd2b25 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/go.mod @@ -0,0 +1,15 @@ +module github.com/ipfs/go-ipld-cbor + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.3 + github.com/ipfs/go-ipfs-util v0.0.1 + github.com/ipfs/go-ipld-format v0.0.1 + github.com/multiformats/go-multihash v0.0.10 + github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 + github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa // indirect + github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 // indirect + github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 +) + +go 1.13 diff --git a/vendor/github.com/ipfs/go-ipld-cbor/go.sum b/vendor/github.com/ipfs/go-ipld-cbor/go.sum new file mode 100644 index 0000000000..2471a67c4f --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/go.sum @@ -0,0 +1,62 @@ +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/ipfs/go-ipld-cbor/node.go b/vendor/github.com/ipfs/go-ipld-cbor/node.go new file mode 100644 index 0000000000..139498c82b --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/node.go @@ -0,0 +1,545 @@ +package cbornode + +import ( + "encoding/json" + "errors" + "io" + "math" + "strconv" + "strings" + + blocks "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + node "github.com/ipfs/go-ipld-format" + mh "github.com/multiformats/go-multihash" +) + +// CBORTagLink is the integer used to represent tags in CBOR. +const CBORTagLink = 42 + +// Node represents an IPLD node. +type Node struct { + obj interface{} + tree []string + links []*node.Link + raw []byte + cid cid.Cid +} + +// Compile time check to make sure Node implements the node.Node interface +var _ node.Node = (*Node)(nil) + +var ( + // ErrNoSuchLink is returned when no link with the given name was found. + ErrNoSuchLink = errors.New("no such link found") + ErrNonLink = errors.New("non-link found at given path") + ErrInvalidLink = errors.New("link value should have been bytes") + ErrInvalidKeys = errors.New("map keys must be strings") + ErrArrayOutOfRange = errors.New("array index out of range") + ErrNoLinks = errors.New("tried to resolve through object that had no links") + ErrEmptyLink = errors.New("link value was empty") + ErrInvalidMultibase = errors.New("invalid multibase on IPLD link") + ErrNonStringLink = errors.New("link should have been a string") +) + +// DecodeBlock decodes a CBOR encoded Block into an IPLD Node. +// +// This method *does not* canonicalize and *will* preserve the CID. As a matter +// of fact, it will assume that `block.Cid()` returns the correct CID and will +// make no effort to validate this assumption. +// +// In general, you should not be calling this method directly. Instead, you +// should be calling the `Decode` method from the `go-ipld-format` package. That +// method will pick the right decoder based on the Block's CID. +// +// Note: This function keeps a reference to `block` and assumes that it is +// immutable. +func DecodeBlock(block blocks.Block) (node.Node, error) { + return decodeBlock(block) +} + +func decodeBlock(block blocks.Block) (*Node, error) { + var m interface{} + if err := DecodeInto(block.RawData(), &m); err != nil { + return nil, err + } + return newObject(block, m) +} + +func newObject(block blocks.Block, m interface{}) (*Node, error) { + tree, links, err := compute(m) + if err != nil { + return nil, err + } + + return &Node{ + obj: m, + tree: tree, + links: links, + raw: block.RawData(), + cid: block.Cid(), + }, nil +} + +var _ node.DecodeBlockFunc = DecodeBlock + +// Decode decodes a CBOR object into an IPLD Node. +// +// If passed a non-canonical CBOR node, this function will canonicalize it. +// Therefore, `bytes.Equal(b, Decode(b).RawData())` may not hold. If you already +// have a CID for this data and want to ensure that it doesn't change, you +// should use `DecodeBlock`. +// mhType is multihash code to use for hashing, for example mh.SHA2_256 +// +// Note: This function does not hold onto `b`. You may reuse it. +func Decode(b []byte, mhType uint64, mhLen int) (*Node, error) { + var m interface{} + if err := DecodeInto(b, &m); err != nil { + return nil, err + } + + // We throw away `b` here to ensure that we canonicalize the encoded + // CBOR object. + return WrapObject(m, mhType, mhLen) +} + +// DecodeInto decodes a serialized IPLD cbor object into the given object. +func DecodeInto(b []byte, v interface{}) error { + return unmarshaller.Unmarshal(b, v) +} + +func DecodeReader(r io.Reader, v interface{}) error { + return unmarshaller.Decode(r, v) +} + +// WrapObject converts an arbitrary object into a Node. +func WrapObject(m interface{}, mhType uint64, mhLen int) (*Node, error) { + data, err := marshaller.Marshal(m) + if err != nil { + return nil, err + } + + var obj interface{} + err = cloner.Clone(m, &obj) + if err != nil { + return nil, err + } + + if mhType == math.MaxUint64 { + mhType = mh.SHA2_256 + } + + hash, err := mh.Sum(data, mhType, mhLen) + if err != nil { + return nil, err + } + c := cid.NewCidV1(cid.DagCBOR, hash) + + block, err := blocks.NewBlockWithCid(data, c) + if err != nil { + // TODO: Shouldn't this just panic? + return nil, err + } + // No need to deserialize. We can just deep copy. + return newObject(block, obj) +} + +// Resolve resolves a given path, and returns the object found at the end, as well +// as the possible tail of the path that was not resolved. +func (n *Node) Resolve(path []string) (interface{}, []string, error) { + var cur interface{} = n.obj + for i, val := range path { + switch curv := cur.(type) { + case map[string]interface{}: + next, ok := curv[val] + if !ok { + return nil, nil, ErrNoSuchLink + } + + cur = next + case map[interface{}]interface{}: + next, ok := curv[val] + if !ok { + return nil, nil, ErrNoSuchLink + } + + cur = next + case []interface{}: + n, err := strconv.Atoi(val) + if err != nil { + return nil, nil, err + } + + if n < 0 || n >= len(curv) { + return nil, nil, ErrArrayOutOfRange + } + + cur = curv[n] + case cid.Cid: + return &node.Link{Cid: curv}, path[i:], nil + default: + return nil, nil, ErrNoLinks + } + } + + lnk, ok := cur.(cid.Cid) + if ok { + return &node.Link{Cid: lnk}, nil, nil + } + + jsonish, err := convertToJSONIsh(cur) + if err != nil { + return nil, nil, err + } + + return jsonish, nil, nil +} + +// Copy creates a copy of the Node. +func (n *Node) Copy() node.Node { + links := make([]*node.Link, len(n.links)) + copy(links, n.links) + + raw := make([]byte, len(n.raw)) + copy(raw, n.raw) + + tree := make([]string, len(n.tree)) + copy(tree, n.tree) + + return &Node{ + obj: copyObj(n.obj), + links: links, + raw: raw, + tree: tree, + cid: n.cid, + } +} + +func copyObj(i interface{}) interface{} { + switch i := i.(type) { + case map[string]interface{}: + out := make(map[string]interface{}) + for k, v := range i { + out[k] = copyObj(v) + } + return out + case map[interface{}]interface{}: + out := make(map[interface{}]interface{}) + for k, v := range i { + out[k] = copyObj(v) + } + return out + case []interface{}: + var out []interface{} + for _, v := range i { + out = append(out, copyObj(v)) + } + return out + default: + // TODO: do not be lazy + // being lazy for now + // use caution + return i + } +} + +// ResolveLink resolves a path and returns the raw Link at the end, as well as +// the possible tail of the path that was not resolved. +func (n *Node) ResolveLink(path []string) (*node.Link, []string, error) { + obj, rest, err := n.Resolve(path) + if err != nil { + return nil, nil, err + } + + lnk, ok := obj.(*node.Link) + if !ok { + return nil, rest, ErrNonLink + } + + return lnk, rest, nil +} + +// Tree returns a flattend array of paths at the given path for the given depth. +func (n *Node) Tree(path string, depth int) []string { + if path == "" && depth == -1 { + return n.tree + } + + var out []string + for _, t := range n.tree { + if !strings.HasPrefix(t, path) { + continue + } + + sub := strings.TrimLeft(t[len(path):], "/") + if sub == "" { + continue + } + + if depth < 0 { + out = append(out, sub) + continue + } + + parts := strings.Split(sub, "/") + if len(parts) <= depth { + out = append(out, sub) + } + } + return out +} + +func compute(obj interface{}) (tree []string, links []*node.Link, err error) { + err = traverse(obj, "", func(name string, val interface{}) error { + if name != "" { + tree = append(tree, name[1:]) + } + if lnk, ok := val.(cid.Cid); ok { + links = append(links, &node.Link{Cid: lnk}) + } + return nil + }) + if err != nil { + return nil, nil, err + } + + return tree, links, nil +} + +// Links lists all known links of the Node. +func (n *Node) Links() []*node.Link { + return n.links +} + +func traverse(obj interface{}, cur string, cb func(string, interface{}) error) error { + if err := cb(cur, obj); err != nil { + return err + } + + switch obj := obj.(type) { + case map[string]interface{}: + for k, v := range obj { + this := cur + "/" + k + if err := traverse(v, this, cb); err != nil { + return err + } + } + return nil + case map[interface{}]interface{}: + for k, v := range obj { + ks, ok := k.(string) + if !ok { + return errors.New("map key was not a string") + } + this := cur + "/" + ks + if err := traverse(v, this, cb); err != nil { + return err + } + } + return nil + case []interface{}: + for i, v := range obj { + this := cur + "/" + strconv.Itoa(i) + if err := traverse(v, this, cb); err != nil { + return err + } + } + return nil + default: + return nil + } +} + +// RawData returns the raw bytes that represent the Node as serialized CBOR. +func (n *Node) RawData() []byte { + return n.raw +} + +// Cid returns the canonical Cid of the NOde. +func (n *Node) Cid() cid.Cid { + return n.cid +} + +// Loggable returns a loggable representation of the Node. +func (n *Node) Loggable() map[string]interface{} { + return map[string]interface{}{ + "node_type": "cbor", + "cid": n.Cid(), + } +} + +// Size returns the size of the binary representation of the Node. +func (n *Node) Size() (uint64, error) { + return uint64(len(n.RawData())), nil +} + +// Stat returns stats about the Node. +// TODO: implement? +func (n *Node) Stat() (*node.NodeStat, error) { + return &node.NodeStat{}, nil +} + +// String returns the string representation of the CID of the Node. +func (n *Node) String() string { + return n.Cid().String() +} + +// MarshalJSON converts the Node into its JSON representation. +func (n *Node) MarshalJSON() ([]byte, error) { + out, err := convertToJSONIsh(n.obj) + if err != nil { + return nil, err + } + + return json.Marshal(out) +} + +// DumpObject marshals any object into its CBOR serialized byte representation +// TODO: rename +func DumpObject(obj interface{}) (out []byte, err error) { + return marshaller.Marshal(obj) +} + +func toSaneMap(n map[interface{}]interface{}) (interface{}, error) { + if lnk, ok := n["/"]; ok && len(n) == 1 { + lnkb, ok := lnk.([]byte) + if !ok { + return nil, ErrInvalidLink + } + + c, err := cid.Cast(lnkb) + if err != nil { + return nil, err + } + + return map[string]interface{}{"/": c}, nil + } + out := make(map[string]interface{}) + for k, v := range n { + ks, ok := k.(string) + if !ok { + return nil, ErrInvalidKeys + } + + obj, err := convertToJSONIsh(v) + if err != nil { + return nil, err + } + + out[ks] = obj + } + + return out, nil +} + +func convertToJSONIsh(v interface{}) (interface{}, error) { + switch v := v.(type) { + case map[interface{}]interface{}: + return toSaneMap(v) + case []interface{}: + var out []interface{} + if len(v) == 0 && v != nil { + return []interface{}{}, nil + } + for _, i := range v { + obj, err := convertToJSONIsh(i) + if err != nil { + return nil, err + } + + out = append(out, obj) + } + return out, nil + default: + return v, nil + } +} + +// FromJSON converts incoming JSON into a Node. +func FromJSON(r io.Reader, mhType uint64, mhLen int) (*Node, error) { + var m interface{} + err := json.NewDecoder(r).Decode(&m) + if err != nil { + return nil, err + } + + obj, err := convertToCborIshObj(m) + if err != nil { + return nil, err + } + + return WrapObject(obj, mhType, mhLen) +} + +func convertToCborIshObj(i interface{}) (interface{}, error) { + switch v := i.(type) { + case map[string]interface{}: + if len(v) == 0 && v != nil { + return v, nil + } + + if lnk, ok := v["/"]; ok && len(v) == 1 { + // special case for links + vstr, ok := lnk.(string) + if !ok { + return nil, ErrNonStringLink + } + + return cid.Decode(vstr) + } + + for a, b := range v { + val, err := convertToCborIshObj(b) + if err != nil { + return nil, err + } + + v[a] = val + } + return v, nil + case []interface{}: + if len(v) == 0 && v != nil { + return v, nil + } + + var out []interface{} + for _, o := range v { + obj, err := convertToCborIshObj(o) + if err != nil { + return nil, err + } + + out = append(out, obj) + } + + return out, nil + default: + return v, nil + } +} + +func castBytesToCid(x []byte) (cid.Cid, error) { + if len(x) == 0 { + return cid.Cid{}, ErrEmptyLink + } + + // TODO: manually doing multibase checking here since our deps don't + // support binary multibase yet + if x[0] != 0 { + return cid.Cid{}, ErrInvalidMultibase + } + + c, err := cid.Cast(x[1:]) + if err != nil { + return cid.Cid{}, ErrInvalidLink + } + + return c, nil +} + +func castCidToBytes(link cid.Cid) ([]byte, error) { + if !link.Defined() { + return nil, ErrEmptyLink + } + return append([]byte{0}, link.Bytes()...), nil +} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/readable.go b/vendor/github.com/ipfs/go-ipld-cbor/readable.go new file mode 100644 index 0000000000..cc9d03facc --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/readable.go @@ -0,0 +1,33 @@ +package cbornode + +import ( + "bufio" + "bytes" + + cbor "github.com/polydawn/refmt/cbor" + "github.com/polydawn/refmt/pretty" + "github.com/polydawn/refmt/shared" +) + +//HumanReadable returns a string representation of a CBOR blob +func HumanReadable(blob []byte) (string, error) { + reader := bytes.NewReader(blob) + + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + err := shared.TokenPump{ + TokenSource: cbor.NewDecoder(cbor.DecodeOptions{}, reader), + TokenSink: pretty.NewEncoder(writer), + }.Run() + + if err != nil { + return "", err + } + + if err = writer.Flush(); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/refmt.go b/vendor/github.com/ipfs/go-ipld-cbor/refmt.go new file mode 100644 index 0000000000..e8bd7ded51 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/refmt.go @@ -0,0 +1,72 @@ +package cbornode + +import ( + "math/big" + + cid "github.com/ipfs/go-cid" + + encoding "github.com/ipfs/go-ipld-cbor/encoding" + + "github.com/polydawn/refmt/obj/atlas" +) + +// This atlas describes the CBOR Tag (42) for IPLD links, such that refmt can marshal and unmarshal them +var cidAtlasEntry = atlas.BuildEntry(cid.Cid{}). + UseTag(CBORTagLink). + Transform(). + TransformMarshal(atlas.MakeMarshalTransformFunc( + castCidToBytes, + )). + TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( + castBytesToCid, + )). + Complete() + +// BigIntAtlasEntry gives a reasonable default encoding for big.Int. It is not +// included in the entries by default. +var BigIntAtlasEntry = atlas.BuildEntry(big.Int{}).Transform(). + TransformMarshal(atlas.MakeMarshalTransformFunc( + func(i big.Int) ([]byte, error) { + return i.Bytes(), nil + })). + TransformUnmarshal(atlas.MakeUnmarshalTransformFunc( + func(x []byte) (big.Int, error) { + return *big.NewInt(0).SetBytes(x), nil + })). + Complete() + +// CborAtlas is the refmt.Atlas used by the CBOR IPLD decoder/encoder. +var CborAtlas atlas.Atlas +var cborSortingMode = atlas.KeySortMode_RFC7049 +var atlasEntries = []*atlas.AtlasEntry{cidAtlasEntry} + +var ( + cloner encoding.PooledCloner + unmarshaller encoding.PooledUnmarshaller + marshaller encoding.PooledMarshaller +) + +func init() { + rebuildAtlas() +} + +func rebuildAtlas() { + CborAtlas = atlas.MustBuild(atlasEntries...). + WithMapMorphism(atlas.MapMorphism{KeySortMode: atlas.KeySortMode_RFC7049}) + + marshaller = encoding.NewPooledMarshaller(CborAtlas) + unmarshaller = encoding.NewPooledUnmarshaller(CborAtlas) + cloner = encoding.NewPooledCloner(CborAtlas) +} + +// RegisterCborType allows to register a custom cbor type +func RegisterCborType(i interface{}) { + var entry *atlas.AtlasEntry + if ae, ok := i.(*atlas.AtlasEntry); ok { + entry = ae + } else { + entry = atlas.BuildEntry(i).StructMap().AutogenerateWithSortingScheme(atlas.KeySortMode_RFC7049).Complete() + } + atlasEntries = append(atlasEntries, entry) + rebuildAtlas() +} diff --git a/vendor/github.com/ipfs/go-ipld-cbor/store.go b/vendor/github.com/ipfs/go-ipld-cbor/store.go new file mode 100644 index 0000000000..05f51e345a --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-cbor/store.go @@ -0,0 +1,179 @@ +package cbornode + +import ( + "bytes" + "context" + "fmt" + + block "github.com/ipfs/go-block-format" + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + recbor "github.com/polydawn/refmt/cbor" + atlas "github.com/polydawn/refmt/obj/atlas" + cbg "github.com/whyrusleeping/cbor-gen" +) + +// IpldStore wraps a Blockstore and provides an interface for storing and retrieving CBOR encoded data. +type IpldStore interface { + Get(ctx context.Context, c cid.Cid, out interface{}) error + Put(ctx context.Context, v interface{}) (cid.Cid, error) +} + +// IpldBlockstore defines a subset of the go-ipfs-blockstore Blockstore interface providing methods +// for storing and retrieving block-centered data. +type IpldBlockstore interface { + Get(cid.Cid) (block.Block, error) + Put(block.Block) error +} + +// BasicIpldStore wraps and IpldBlockstore and implements the IpldStore interface. +type BasicIpldStore struct { + Blocks IpldBlockstore + Atlas *atlas.Atlas +} + +var _ IpldStore = &BasicIpldStore{} + +// NewCborStore returns an IpldStore implementation backed by the provided IpldBlockstore. +func NewCborStore(bs IpldBlockstore) *BasicIpldStore { + return &BasicIpldStore{Blocks: bs} +} + +// Get reads and unmarshals the content at `c` into `out`. +func (s *BasicIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { + blk, err := s.Blocks.Get(c) + if err != nil { + return err + } + + cu, ok := out.(cbg.CBORUnmarshaler) + if ok { + if err := cu.UnmarshalCBOR(bytes.NewReader(blk.RawData())); err != nil { + return NewSerializationError(err) + } + return nil + } + + if s.Atlas == nil { + return DecodeInto(blk.RawData(), out) + } else { + return recbor.UnmarshalAtlased(recbor.DecodeOptions{}, blk.RawData(), out, *s.Atlas) + } +} + +type cidProvider interface { + Cid() cid.Cid +} + +// Put marshals and writes content `v` to the backing blockstore returning its CID. +func (s *BasicIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { + mhType := uint64(mh.BLAKE2B_MIN + 31) + mhLen := -1 + codec := uint64(cid.DagCBOR) + + var expCid cid.Cid + if c, ok := v.(cidProvider); ok { + expCid := c.Cid() + pref := expCid.Prefix() + mhType = pref.MhType + mhLen = pref.MhLength + codec = pref.Codec + } + + cm, ok := v.(cbg.CBORMarshaler) + if ok { + buf := new(bytes.Buffer) + if err := cm.MarshalCBOR(buf); err != nil { + return cid.Undef, NewSerializationError(err) + } + + pref := cid.Prefix{ + Codec: codec, + MhType: mhType, + MhLength: mhLen, + Version: 1, + } + c, err := pref.Sum(buf.Bytes()) + if err != nil { + return cid.Undef, err + } + + blk, err := block.NewBlockWithCid(buf.Bytes(), c) + if err != nil { + return cid.Undef, err + } + + if err := s.Blocks.Put(blk); err != nil { + return cid.Undef, err + } + + blkCid := blk.Cid() + if expCid != cid.Undef && blkCid != expCid { + return cid.Undef, fmt.Errorf("your object is not being serialized the way it expects to") + } + + return blkCid, nil + } + + nd, err := WrapObject(v, mhType, mhLen) + if err != nil { + return cid.Undef, err + } + + if err := s.Blocks.Put(nd); err != nil { + return cid.Undef, err + } + + ndCid := nd.Cid() + if expCid != cid.Undef && ndCid != expCid { + return cid.Undef, fmt.Errorf("your object is not being serialized the way it expects to") + } + + return ndCid, nil +} + +func NewSerializationError(err error) error { + return SerializationError{err} +} + +type SerializationError struct { + err error +} + +func (se SerializationError) Error() string { + return se.err.Error() +} + +func (se SerializationError) Unwrap() error { + return se.err +} + +func (se SerializationError) Is(o error) bool { + _, ok := o.(*SerializationError) + return ok +} + +func NewMemCborStore() IpldStore { + return NewCborStore(newMockBlocks()) +} + +type mockBlocks struct { + data map[cid.Cid]block.Block +} + +func newMockBlocks() *mockBlocks { + return &mockBlocks{make(map[cid.Cid]block.Block)} +} + +func (mb *mockBlocks) Get(c cid.Cid) (block.Block, error) { + d, ok := mb.data[c] + if ok { + return d, nil + } + return nil, fmt.Errorf("Not Found") +} + +func (mb *mockBlocks) Put(b block.Block) error { + mb.data[b.Cid()] = b + return nil +} diff --git a/vendor/github.com/ipfs/go-ipld-format/.travis.yml b/vendor/github.com/ipfs/go-ipld-format/.travis.yml new file mode 100644 index 0000000000..936d6a426e --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.14.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-ipld-format/LICENSE b/vendor/github.com/ipfs/go-ipld-format/LICENSE new file mode 100644 index 0000000000..26100332ba --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Jeromy Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-ipld-format/Makefile b/vendor/github.com/ipfs/go-ipld-format/Makefile new file mode 100644 index 0000000000..7811c099ea --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/Makefile @@ -0,0 +1,15 @@ +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go + +covertools: + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +deps: gx covertools + gx --verbose install --global + gx-go rewrite + +publish: + gx-go rewrite --undo + diff --git a/vendor/github.com/ipfs/go-ipld-format/README.md b/vendor/github.com/ipfs/go-ipld-format/README.md new file mode 100644 index 0000000000..425c80c7d9 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/README.md @@ -0,0 +1,38 @@ +go-ipld-format +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Coverage Status](https://codecov.io/gh/ipfs/go-ipld-format/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-ipld-format/branch/master) +[![Travis CI](https://travis-ci.org/ipfs/go-ipld-format.svg?branch=master)](https://travis-ci.org/ipfs/go-ipld-format) + +> go-ipld-format is a set of interfaces that a type needs to implement in order to be a part of the ipld merkle-forest. + +## Lead Maintainer + +[Eric Myhre](https://github.com/warpfork) + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [API](#api) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +make install +``` + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Jeromy Johnson diff --git a/vendor/github.com/ipfs/go-ipld-format/batch.go b/vendor/github.com/ipfs/go-ipld-format/batch.go new file mode 100644 index 0000000000..28491032c4 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/batch.go @@ -0,0 +1,301 @@ +package format + +import ( + "context" + "errors" + "runtime" + + cid "github.com/ipfs/go-cid" +) + +// parallelBatchCommits is the number of batch commits that can be in-flight before blocking. +// TODO(ipfs/go-ipfs#4299): Experiment with multiple datastores, storage +// devices, and CPUs to find the right value/formula. +var parallelCommits = runtime.NumCPU() + +// ErrNotCommited is returned when closing a batch that hasn't been successfully +// committed. +var ErrNotCommited = errors.New("error: batch not commited") + +// ErrClosed is returned when operating on a batch that has already been closed. +var ErrClosed = errors.New("error: batch closed") + +// NewBatch returns a node buffer (Batch) that buffers nodes internally and +// commits them to the underlying DAGService in batches. Use this if you intend +// to add or remove a lot of nodes all at once. +// +// If the passed context is canceled, any in-progress commits are aborted. +// +func NewBatch(ctx context.Context, na NodeAdder, opts ...BatchOption) *Batch { + ctx, cancel := context.WithCancel(ctx) + bopts := defaultBatchOptions + for _, o := range opts { + o(&bopts) + } + + // Commit numCPU batches at once, but split the maximum buffer size over all commits in flight. + bopts.maxSize /= parallelCommits + bopts.maxNodes /= parallelCommits + return &Batch{ + na: na, + ctx: ctx, + cancel: cancel, + commitResults: make(chan error, parallelCommits), + opts: bopts, + } +} + +// Batch is a buffer for batching adds to a dag. +type Batch struct { + na NodeAdder + + ctx context.Context + cancel func() + + activeCommits int + err error + commitResults chan error + + nodes []Node + size int + + opts batchOptions +} + +func (t *Batch) processResults() { + for t.activeCommits > 0 { + select { + case err := <-t.commitResults: + t.activeCommits-- + if err != nil { + t.setError(err) + return + } + default: + return + } + } +} + +func (t *Batch) asyncCommit() { + numBlocks := len(t.nodes) + if numBlocks == 0 { + return + } + if t.activeCommits >= parallelCommits { + select { + case err := <-t.commitResults: + t.activeCommits-- + + if err != nil { + t.setError(err) + return + } + case <-t.ctx.Done(): + t.setError(t.ctx.Err()) + return + } + } + go func(ctx context.Context, b []Node, result chan error, na NodeAdder) { + select { + case result <- na.AddMany(ctx, b): + case <-ctx.Done(): + } + }(t.ctx, t.nodes, t.commitResults, t.na) + + t.activeCommits++ + t.nodes = make([]Node, 0, numBlocks) + t.size = 0 + + return +} + +// Add adds a node to the batch and commits the batch if necessary. +func (t *Batch) Add(ctx context.Context, nd Node) error { + return t.AddMany(ctx, []Node{nd}) +} + +// AddMany many calls Add for every given Node, thus batching and +// commiting them as needed. +func (t *Batch) AddMany(ctx context.Context, nodes []Node) error { + if t.err != nil { + return t.err + } + // Not strictly necessary but allows us to catch errors early. + t.processResults() + + if t.err != nil { + return t.err + } + + t.nodes = append(t.nodes, nodes...) + for _, nd := range nodes { + t.size += len(nd.RawData()) + } + + if t.size > t.opts.maxSize || len(t.nodes) > t.opts.maxNodes { + t.asyncCommit() + } + return t.err +} + +// Commit commits batched nodes. +func (t *Batch) Commit() error { + if t.err != nil { + return t.err + } + + t.asyncCommit() + +loop: + for t.activeCommits > 0 { + select { + case err := <-t.commitResults: + t.activeCommits-- + if err != nil { + t.setError(err) + break loop + } + case <-t.ctx.Done(): + t.setError(t.ctx.Err()) + break loop + } + } + + return t.err +} + +func (t *Batch) setError(err error) { + t.err = err + + t.cancel() + + // Drain as much as we can without blocking. +loop: + for { + select { + case <-t.commitResults: + default: + break loop + } + } + + // Be nice and cleanup. These can take a *lot* of memory. + t.commitResults = nil + t.na = nil + t.ctx = nil + t.nodes = nil + t.size = 0 + t.activeCommits = 0 +} + +// BatchOption provides a way of setting internal options of +// a Batch. +// +// See this post about the "functional options" pattern: +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type BatchOption func(o *batchOptions) + +type batchOptions struct { + maxSize int + maxNodes int +} + +var defaultBatchOptions = batchOptions{ + maxSize: 8 << 20, + + // By default, only batch up to 128 nodes at a time. + // The current implementation of flatfs opens this many file + // descriptors at the same time for the optimized batch write. + maxNodes: 128, +} + +// MaxSizeBatchOption sets the maximum amount of buffered data before writing +// blocks. +func MaxSizeBatchOption(size int) BatchOption { + return func(o *batchOptions) { + o.maxSize = size + } +} + +// MaxNodesBatchOption sets the maximum number of buffered nodes before writing +// blocks. +func MaxNodesBatchOption(num int) BatchOption { + return func(o *batchOptions) { + o.maxNodes = num + } +} + +// BufferedDAG implements DAGService using a Batch NodeAdder to wrap add +// operations in the given DAGService. It will trigger Commit() before any +// non-Add operations, but otherwise calling Commit() is left to the user. +type BufferedDAG struct { + ds DAGService + b *Batch +} + +// NewBufferedDAG creates a BufferedDAG using the given DAGService and the +// given options for the Batch NodeAdder. +func NewBufferedDAG(ctx context.Context, ds DAGService, opts ...BatchOption) *BufferedDAG { + return &BufferedDAG{ + ds: ds, + b: NewBatch(ctx, ds, opts...), + } +} + +// Commit calls commit on the Batch. +func (bd *BufferedDAG) Commit() error { + return bd.b.Commit() +} + +// Add adds a new node using Batch. +func (bd *BufferedDAG) Add(ctx context.Context, n Node) error { + return bd.b.Add(ctx, n) +} + +// AddMany adds many nodes using Batch. +func (bd *BufferedDAG) AddMany(ctx context.Context, nds []Node) error { + return bd.b.AddMany(ctx, nds) +} + +// Get commits and gets a node from the DAGService. +func (bd *BufferedDAG) Get(ctx context.Context, c cid.Cid) (Node, error) { + err := bd.b.Commit() + if err != nil { + return nil, err + } + return bd.ds.Get(ctx, c) +} + +// GetMany commits and gets nodes from the DAGService. +func (bd *BufferedDAG) GetMany(ctx context.Context, cs []cid.Cid) <-chan *NodeOption { + err := bd.b.Commit() + if err != nil { + ch := make(chan *NodeOption, 1) + defer close(ch) + ch <- &NodeOption{ + Node: nil, + Err: err, + } + return ch + } + return bd.ds.GetMany(ctx, cs) +} + +// Remove commits and removes a node from the DAGService. +func (bd *BufferedDAG) Remove(ctx context.Context, c cid.Cid) error { + err := bd.b.Commit() + if err != nil { + return err + } + return bd.ds.Remove(ctx, c) +} + +// RemoveMany commits and removes nodes from the DAGService. +func (bd *BufferedDAG) RemoveMany(ctx context.Context, cs []cid.Cid) error { + err := bd.b.Commit() + if err != nil { + return err + } + return bd.ds.RemoveMany(ctx, cs) +} diff --git a/vendor/github.com/ipfs/go-ipld-format/codecov.yml b/vendor/github.com/ipfs/go-ipld-format/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/ipfs/go-ipld-format/coding.go b/vendor/github.com/ipfs/go-ipld-format/coding.go new file mode 100644 index 0000000000..e202f75a14 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/coding.go @@ -0,0 +1,62 @@ +package format + +import ( + "fmt" + "sync" + + blocks "github.com/ipfs/go-block-format" +) + +// DecodeBlockFunc functions decode blocks into nodes. +type DecodeBlockFunc func(block blocks.Block) (Node, error) + +type BlockDecoder interface { + Register(codec uint64, decoder DecodeBlockFunc) + Decode(blocks.Block) (Node, error) +} +type safeBlockDecoder struct { + // Can be replaced with an RCU if necessary. + lock sync.RWMutex + decoders map[uint64]DecodeBlockFunc +} + +// Register registers decoder for all blocks with the passed codec. +// +// This will silently replace any existing registered block decoders. +func (d *safeBlockDecoder) Register(codec uint64, decoder DecodeBlockFunc) { + d.lock.Lock() + defer d.lock.Unlock() + d.decoders[codec] = decoder +} + +func (d *safeBlockDecoder) Decode(block blocks.Block) (Node, error) { + // Short-circuit by cast if we already have a Node. + if node, ok := block.(Node); ok { + return node, nil + } + + ty := block.Cid().Type() + + d.lock.RLock() + decoder, ok := d.decoders[ty] + d.lock.RUnlock() + + if ok { + return decoder(block) + } else { + // TODO: get the *long* name for this format + return nil, fmt.Errorf("unrecognized object type: %d", ty) + } +} + +var DefaultBlockDecoder BlockDecoder = &safeBlockDecoder{decoders: make(map[uint64]DecodeBlockFunc)} + +// Decode decodes the given block using the default BlockDecoder. +func Decode(block blocks.Block) (Node, error) { + return DefaultBlockDecoder.Decode(block) +} + +// Register registers block decoders with the default BlockDecoder. +func Register(codec uint64, decoder DecodeBlockFunc) { + DefaultBlockDecoder.Register(codec, decoder) +} diff --git a/vendor/github.com/ipfs/go-ipld-format/daghelpers.go b/vendor/github.com/ipfs/go-ipld-format/daghelpers.go new file mode 100644 index 0000000000..70ad0b91ea --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/daghelpers.go @@ -0,0 +1,118 @@ +package format + +import ( + "context" + + cid "github.com/ipfs/go-cid" +) + +// GetLinks returns the CIDs of the children of the given node. Prefer this +// method over looking up the node itself and calling `Links()` on it as this +// method may be able to use a link cache. +func GetLinks(ctx context.Context, ng NodeGetter, c cid.Cid) ([]*Link, error) { + if c.Type() == cid.Raw { + return nil, nil + } + if gl, ok := ng.(LinkGetter); ok { + return gl.GetLinks(ctx, c) + } + node, err := ng.Get(ctx, c) + if err != nil { + return nil, err + } + return node.Links(), nil +} + +// GetDAG will fill out all of the links of the given Node. +// It returns an array of NodePromise with the linked nodes all in the proper +// order. +func GetDAG(ctx context.Context, ds NodeGetter, root Node) []*NodePromise { + var cids []cid.Cid + for _, lnk := range root.Links() { + cids = append(cids, lnk.Cid) + } + + return GetNodes(ctx, ds, cids) +} + +// GetNodes returns an array of 'FutureNode' promises, with each corresponding +// to the key with the same index as the passed in keys +func GetNodes(ctx context.Context, ds NodeGetter, keys []cid.Cid) []*NodePromise { + + // Early out if no work to do + if len(keys) == 0 { + return nil + } + + promises := make([]*NodePromise, len(keys)) + for i := range keys { + promises[i] = NewNodePromise(ctx) + } + + dedupedKeys := dedupeKeys(keys) + go func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + nodechan := ds.GetMany(ctx, dedupedKeys) + + for count := 0; count < len(keys); { + select { + case opt, ok := <-nodechan: + if !ok { + for _, p := range promises { + p.Fail(ErrNotFound) + } + return + } + + if opt.Err != nil { + for _, p := range promises { + p.Fail(opt.Err) + } + return + } + + nd := opt.Node + c := nd.Cid() + for i, lnk_c := range keys { + if c.Equals(lnk_c) { + count++ + promises[i].Send(nd) + } + } + case <-ctx.Done(): + return + } + } + }() + return promises +} + +func Copy(ctx context.Context, from, to DAGService, root cid.Cid) error { + node, err := from.Get(ctx, root) + if err != nil { + return err + } + links := node.Links() + for _, link := range links { + err := Copy(ctx, from, to, link.Cid) + if err != nil { + return err + } + } + err = to.Add(ctx, node) + if err != nil { + return err + } + return nil +} + +// Remove duplicates from a list of keys +func dedupeKeys(cids []cid.Cid) []cid.Cid { + set := cid.NewSet() + for _, c := range cids { + set.Add(c) + } + return set.Keys() +} diff --git a/vendor/github.com/ipfs/go-ipld-format/format.go b/vendor/github.com/ipfs/go-ipld-format/format.go new file mode 100644 index 0000000000..990e1ce0f5 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/format.go @@ -0,0 +1,90 @@ +package format + +import ( + "context" + "fmt" + + blocks "github.com/ipfs/go-block-format" + + cid "github.com/ipfs/go-cid" +) + +type Resolver interface { + // Resolve resolves a path through this node, stopping at any link boundary + // and returning the object found as well as the remaining path to traverse + Resolve(path []string) (interface{}, []string, error) + + // Tree lists all paths within the object under 'path', and up to the given depth. + // To list the entire object (similar to `find .`) pass "" and -1 + Tree(path string, depth int) []string +} + +// Node is the base interface all IPLD nodes must implement. +// +// Nodes are **Immutable** and all methods defined on the interface are +// **Thread Safe**. +type Node interface { + blocks.Block + Resolver + + // ResolveLink is a helper function that calls resolve and asserts the + // output is a link + ResolveLink(path []string) (*Link, []string, error) + + // Copy returns a deep copy of this node + Copy() Node + + // Links is a helper function that returns all links within this object + Links() []*Link + + // TODO: not sure if stat deserves to stay + Stat() (*NodeStat, error) + + // Size returns the size in bytes of the serialized object + Size() (uint64, error) +} + +// Link represents an IPFS Merkle DAG Link between Nodes. +type Link struct { + // utf string name. should be unique per object + Name string // utf8 + + // cumulative size of target object + Size uint64 + + // multihash of the target object + Cid cid.Cid +} + +// NodeStat is a statistics object for a Node. Mostly sizes. +type NodeStat struct { + Hash string + NumLinks int // number of links in link table + BlockSize int // size of the raw, encoded data + LinksSize int // size of the links segment + DataSize int // size of the data segment + CumulativeSize int // cumulative size of object and its references +} + +func (ns NodeStat) String() string { + f := "NodeStat{NumLinks: %d, BlockSize: %d, LinksSize: %d, DataSize: %d, CumulativeSize: %d}" + return fmt.Sprintf(f, ns.NumLinks, ns.BlockSize, ns.LinksSize, ns.DataSize, ns.CumulativeSize) +} + +// MakeLink creates a link to the given node +func MakeLink(n Node) (*Link, error) { + s, err := n.Size() + if err != nil { + return nil, err + } + + return &Link{ + Size: s, + Cid: n.Cid(), + }, nil +} + +// GetNode returns the MDAG Node that this link points to +func (l *Link) GetNode(ctx context.Context, serv NodeGetter) (Node, error) { + return serv.Get(ctx, l.Cid) +} diff --git a/vendor/github.com/ipfs/go-ipld-format/go.mod b/vendor/github.com/ipfs/go-ipld-format/go.mod new file mode 100644 index 0000000000..0aab53b207 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/go.mod @@ -0,0 +1,10 @@ +module github.com/ipfs/go-ipld-format + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.2 + github.com/libp2p/go-buffer-pool v0.0.2 // indirect + github.com/multiformats/go-multihash v0.0.1 +) + +go 1.13 diff --git a/vendor/github.com/ipfs/go-ipld-format/go.sum b/vendor/github.com/ipfs/go-ipld-format/go.sum new file mode 100644 index 0000000000..f900301234 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/go.sum @@ -0,0 +1,30 @@ +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2 h1:tuuKaZPU1M6HcejsO3AcYWW8sZ8MTvyxfc4uqB4eFE8= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/ipfs/go-ipld-format/merkledag.go b/vendor/github.com/ipfs/go-ipld-format/merkledag.go new file mode 100644 index 0000000000..755b90f6f5 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/merkledag.go @@ -0,0 +1,67 @@ +package format + +import ( + "context" + "fmt" + + cid "github.com/ipfs/go-cid" +) + +var ErrNotFound = fmt.Errorf("merkledag: not found") + +// Either a node or an error. +type NodeOption struct { + Node Node + Err error +} + +// The basic Node resolution service. +type NodeGetter interface { + // Get retrieves nodes by CID. Depending on the NodeGetter + // implementation, this may involve fetching the Node from a remote + // machine; consider setting a deadline in the context. + Get(context.Context, cid.Cid) (Node, error) + + // GetMany returns a channel of NodeOptions given a set of CIDs. + GetMany(context.Context, []cid.Cid) <-chan *NodeOption +} + +// NodeAdder adds nodes to a DAG. +type NodeAdder interface { + // Add adds a node to this DAG. + Add(context.Context, Node) error + + // AddMany adds many nodes to this DAG. + // + // Consider using the Batch NodeAdder (`NewBatch`) if you make + // extensive use of this function. + AddMany(context.Context, []Node) error +} + +// NodeGetters can optionally implement this interface to make finding linked +// objects faster. +type LinkGetter interface { + NodeGetter + + // TODO(ipfs/go-ipld-format#9): This should return []cid.Cid + + // GetLinks returns the children of the node refered to by the given + // CID. + GetLinks(ctx context.Context, nd cid.Cid) ([]*Link, error) +} + +// DAGService is an IPFS Merkle DAG service. +type DAGService interface { + NodeGetter + NodeAdder + + // Remove removes a node from this DAG. + // + // Remove returns no error if the requested node is not present in this DAG. + Remove(context.Context, cid.Cid) error + + // RemoveMany removes many nodes from this DAG. + // + // It returns success even if the nodes were not present in the DAG. + RemoveMany(context.Context, []cid.Cid) error +} diff --git a/vendor/github.com/ipfs/go-ipld-format/navipld.go b/vendor/github.com/ipfs/go-ipld-format/navipld.go new file mode 100644 index 0000000000..1298673779 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/navipld.go @@ -0,0 +1,149 @@ +package format + +import ( + "context" + + cid "github.com/ipfs/go-cid" +) + +// NavigableIPLDNode implements the `NavigableNode` interface wrapping +// an IPLD `Node` and providing support for node promises. +type NavigableIPLDNode struct { + node Node + + // The CID of each child of the node. + childCIDs []cid.Cid + + // Node promises for child nodes requested. + childPromises []*NodePromise + // TODO: Consider encapsulating it in a single structure alongside `childCIDs`. + + nodeGetter NodeGetter + // TODO: Should this be stored in the `Walker`'s context to avoid passing + // it along to every node? It seems like a structure that doesn't need + // to be replicated (the entire DAG will use the same `NodeGetter`). +} + +// NewNavigableIPLDNode returns a `NavigableIPLDNode` wrapping the provided +// `node`. +func NewNavigableIPLDNode(node Node, nodeGetter NodeGetter) *NavigableIPLDNode { + nn := &NavigableIPLDNode{ + node: node, + nodeGetter: nodeGetter, + } + + nn.childCIDs = getLinkCids(node) + nn.childPromises = make([]*NodePromise, len(nn.childCIDs)) + + return nn +} + +// FetchChild implements the `NavigableNode` interface using node promises +// to preload the following child nodes to `childIndex` leaving them ready +// for subsequent `FetchChild` calls. +func (nn *NavigableIPLDNode) FetchChild(ctx context.Context, childIndex uint) (NavigableNode, error) { + // This function doesn't check that `childIndex` is valid, that's + // the `Walker` responsibility. + + // If we drop to <= preloadSize/2 preloading nodes, preload the next 10. + for i := childIndex; i < childIndex+preloadSize/2 && i < uint(len(nn.childPromises)); i++ { + // TODO: Check if canceled. + if nn.childPromises[i] == nil { + nn.preload(ctx, i) + break + } + } + + child, err := nn.getPromiseValue(ctx, childIndex) + + switch err { + case nil: + case context.DeadlineExceeded, context.Canceled: + if ctx.Err() != nil { + return nil, ctx.Err() + } + + // In this case, the context used to *preload* the node (in a previous + // `FetchChild` call) has been canceled. We need to retry the load with + // the current context and we might as well preload some extra nodes + // while we're at it. + nn.preload(ctx, childIndex) + child, err = nn.getPromiseValue(ctx, childIndex) + if err != nil { + return nil, err + } + default: + return nil, err + } + + return NewNavigableIPLDNode(child, nn.nodeGetter), nil +} + +// Number of nodes to preload every time a child is requested. +// TODO: Give more visibility to this constant, it could be an attribute +// set in the `Walker` context that gets passed in `FetchChild`. +const preloadSize = 10 + +// Preload at most `preloadSize` child nodes from `beg` through promises +// created using this `ctx`. +func (nn *NavigableIPLDNode) preload(ctx context.Context, beg uint) { + end := beg + preloadSize + if end >= uint(len(nn.childCIDs)) { + end = uint(len(nn.childCIDs)) + } + + copy(nn.childPromises[beg:], GetNodes(ctx, nn.nodeGetter, nn.childCIDs[beg:end])) +} + +// Fetch the actual node (this is the blocking part of the mechanism) +// and invalidate the promise. `preload` should always be called first +// for the `childIndex` being fetch. +// +// TODO: Include `preload` into the beginning of this function? +// (And collapse the two calls in `FetchChild`). +func (nn *NavigableIPLDNode) getPromiseValue(ctx context.Context, childIndex uint) (Node, error) { + value, err := nn.childPromises[childIndex].Get(ctx) + nn.childPromises[childIndex] = nil + return value, err +} + +// Get the CID of all the links of this `node`. +func getLinkCids(node Node) []cid.Cid { + links := node.Links() + out := make([]cid.Cid, 0, len(links)) + + for _, l := range links { + out = append(out, l.Cid) + } + return out +} + +// GetIPLDNode returns the IPLD `Node` wrapped into this structure. +func (nn *NavigableIPLDNode) GetIPLDNode() Node { + return nn.node +} + +// ChildTotal implements the `NavigableNode` returning the number +// of links (of child nodes) in this node. +func (nn *NavigableIPLDNode) ChildTotal() uint { + return uint(len(nn.GetIPLDNode().Links())) +} + +// ExtractIPLDNode is a helper function that takes a `NavigableNode` +// and returns the IPLD `Node` wrapped inside. Used in the `Visitor` +// function. +// TODO: Check for errors to avoid a panic? +func ExtractIPLDNode(node NavigableNode) Node { + return node.(*NavigableIPLDNode).GetIPLDNode() +} + +// TODO: `Cleanup` is not supported at the moment in the `Walker`. +// +// Called in `Walker.up()` when the node is not part of the path anymore. +//func (nn *NavigableIPLDNode) Cleanup() { +// // TODO: Ideally this would be the place to issue a context `cancel()` +// // but since the DAG reader uses multiple contexts in the same session +// // (through `Read` and `CtxReadFull`) we would need to store an array +// // with the multiple contexts in `NavigableIPLDNode` with its corresponding +// // cancel functions. +//} diff --git a/vendor/github.com/ipfs/go-ipld-format/promise.go b/vendor/github.com/ipfs/go-ipld-format/promise.go new file mode 100644 index 0000000000..02743b03c8 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/promise.go @@ -0,0 +1,66 @@ +package format + +import ( + "context" +) + +// NodePromise provides a promise like interface for a dag Node +// the first call to Get will block until the Node is received +// from its internal channels, subsequent calls will return the +// cached node. +// +// Thread Safety: This is multiple-consumer/single-producer safe. +func NewNodePromise(ctx context.Context) *NodePromise { + return &NodePromise{ + done: make(chan struct{}), + ctx: ctx, + } +} + +type NodePromise struct { + value Node + err error + done chan struct{} + + ctx context.Context +} + +// Call this function to fail a promise. +// +// Once a promise has been failed or fulfilled, further attempts to fail it will +// be silently dropped. +func (np *NodePromise) Fail(err error) { + if np.err != nil || np.value != nil { + // Already filled. + return + } + np.err = err + close(np.done) +} + +// Fulfill this promise. +// +// Once a promise has been fulfilled or failed, calling this function will +// panic. +func (np *NodePromise) Send(nd Node) { + // if promise has a value, don't fail it + if np.err != nil || np.value != nil { + panic("already filled") + } + np.value = nd + close(np.done) +} + +// Get the value of this promise. +// +// This function is safe to call concurrently from any number of goroutines. +func (np *NodePromise) Get(ctx context.Context) (Node, error) { + select { + case <-np.done: + return np.value, np.err + case <-np.ctx.Done(): + return nil, np.ctx.Err() + case <-ctx.Done(): + return nil, ctx.Err() + } +} diff --git a/vendor/github.com/ipfs/go-ipld-format/walker.go b/vendor/github.com/ipfs/go-ipld-format/walker.go new file mode 100644 index 0000000000..81380936b3 --- /dev/null +++ b/vendor/github.com/ipfs/go-ipld-format/walker.go @@ -0,0 +1,436 @@ +package format + +import ( + "context" + "errors" +) + +// Walker provides methods to move through a DAG of nodes that implement +// the `NavigableNode` interface. It uses iterative algorithms (instead +// of recursive ones) that expose the `path` of nodes from the root to +// the `ActiveNode` it currently points to. +// +// It provides multiple ways to walk through the DAG (e.g. `Iterate` +// and `Seek`). When using them, you provide a Visitor function that +// will be called for each node the Walker traverses. The Visitor can +// read data from those nodes and, optionally, direct the movement of +// the Walker by calling `Pause` (to stop traversing and return) or +// `NextChild` (to skip a child and its descendants). See the DAG reader +// in `github.com/ipfs/go-unixfs/io/dagreader.go` for a usage example. +// TODO: This example isn't merged yet. +type Walker struct { + + // Sequence of nodes in the DAG from the root to the `ActiveNode`, each + // position in the slice being the parent of the next one. The `ActiveNode` + // resides in the position indexed by `currentDepth` (the slice may contain + // more elements past that point but they should be ignored since the slice + // is not truncated to leverage the already allocated space). + // + // Every time `down` is called the `currentDepth` increases and the child + // of the `ActiveNode` is inserted after it (effectively becoming the new + // `ActiveNode`). + // + // The slice must *always* have a length bigger than zero with the root + // of the DAG at the first position (empty DAGs are not valid). + path []NavigableNode + + // Depth of the `ActiveNode`. It grows downwards, root being 0, its child 1, + // and so on. It controls the effective length of `path` and `childIndex`. + // + // A currentDepth of -1 signals the start case of a new `Walker` that hasn't + // moved yet. Although this state is an invalid index to the slices, it + // allows to centralize all the visit calls in the `down` move (starting at + // zero would require a special visit case inside every walk operation like + // `Iterate()` and `Seek`). This value should never be returned to after + // the first `down` movement, moving up from the root should always return + // `errUpOnRoot`. + currentDepth int + + // This slice has the index of the child each node in `path` is pointing + // to. The child index in the node can be set past all of its child nodes + // (having a value equal to `ChildTotal`) to signal it has visited (or + // skipped) all of them. A leaf node with no children that has its index + // in zero would also comply with this format. + // + // Complement to `path`, not only do we need to know which nodes have been + // traversed to reach the `ActiveNode` but also which child nodes they are + // to correctly have the active path of the DAG. (Reword this paragraph.) + childIndex []uint + + // Flag to signal that a pause in the current walk operation has been + // requested by the user inside `Visitor`. + pauseRequested bool + + // Used to pass information from the central `Walker` structure to the + // distributed `NavigableNode`s (to have a centralized configuration + // structure to control the behavior of all of them), e.g., to tell + // the `NavigableIPLDNode` which context should be used to load node + // promises (but this could later be used in more elaborate ways). + ctx context.Context +} + +// `Walker` implementation details: +// +// The `Iterate` and `Seek` walk operations are implemented through two +// basic move methods `up` and `down`, that change which node is the +// `ActiveNode` (modifying the `path` that leads to it). The `NextChild` +// method allows to change which child the `ActiveNode` is pointing to +// in order to change the direction of the descent. +// +// The `down` method is the analogous of a recursive call and the one in +// charge of visiting (possible new) nodes (through `Visitor`) and performing +// some user-defined logic. A `Pause` method is available to interrupt the +// current walk operation after visiting a node. +// +// Key terms and concepts: +// * Walk operation (e.g., `Iterate`). +// * Move methods: `up` and `down`. +// * Active node. +// * Path to the active node. + +// Function called each time a node is arrived upon in a walk operation +// through the `down` method (not when going back `up`). It is the main +// API to implement DAG functionality (e.g., read and seek a file DAG) +// on top of the `Walker` structure. +// +// Its argument is the current `node` being visited (the `ActiveNode`). +// Any error it returns (apart from the internal `errPauseWalkOperation`) +// will be forwarded to the caller of the walk operation (pausing it). +// +// Any of the exported methods of this API should be allowed to be called +// from within this method, e.g., `NextChild`. +// TODO: Check that. Can `ResetPosition` be called without breaking +// the `Walker` integrity? +type Visitor func(node NavigableNode) error + +// NavigableNode is the interface the nodes of a DAG need to implement in +// order to be traversed by the `Walker`. +type NavigableNode interface { + + // FetchChild returns the child of this node pointed to by `childIndex`. + // A `Context` stored in the `Walker` is passed (`ctx`) that may contain + // configuration attributes stored by the user before initiating the + // walk operation. + FetchChild(ctx context.Context, childIndex uint) (NavigableNode, error) + + // ChildTotal returns the number of children of the `ActiveNode`. + ChildTotal() uint + + // TODO: Evaluate providing the `Cleanup` and `Reset` methods. + + // Cleanup is an optional method that is called by the `Walker` when + // this node leaves the active `path`, i.e., when this node is the + // `ActiveNode` and the `up` movement is called. + //Cleanup() + // Allow this method to return an error? That would imply + // modifying the `Walker` API, `up()` would now return an error + // different than `errUpOnRoot`. + + // Reset is an optional function that is called by the `Walker` when + // `ResetPosition` is called, it is only applied to the root node + // of the DAG. + //Reset() +} + +// NewWalker creates a new `Walker` structure from a `root` +// NavigableNode. +func NewWalker(ctx context.Context, root NavigableNode) *Walker { + return &Walker{ + ctx: ctx, + + path: []NavigableNode{root}, + childIndex: []uint{0}, + + currentDepth: -1, + // Starting position, "on top" of the root node, see `currentDepth`. + } +} + +// ActiveNode returns the `NavigableNode` that `Walker` is pointing +// to at the moment. It changes when `up` or `down` is called. +func (w *Walker) ActiveNode() NavigableNode { + return w.path[w.currentDepth] + // TODO: Add a check for the initial state of `currentDepth` -1? +} + +// ErrDownNoChild signals there is no child at `ActiveChildIndex` in the +// `ActiveNode` to go down to. +var ErrDownNoChild = errors.New("can't go down, the child does not exist") + +// errUpOnRoot signals the end of the DAG after returning to the root. +var errUpOnRoot = errors.New("can't go up, already on root") + +// EndOfDag wraps the `errUpOnRoot` and signals to the user that the +// entire DAG has been iterated. +var EndOfDag = errors.New("end of DAG") + +// ErrNextNoChild signals the end of this parent child nodes. +var ErrNextNoChild = errors.New("can't go to the next child, no more child nodes in this parent") + +// errPauseWalkOperation signals the pause of the walk operation. +var errPauseWalkOperation = errors.New("pause in the current walk operation") + +// ErrNilVisitor signals the lack of a `Visitor` function. +var ErrNilVisitor = errors.New("no Visitor function specified") + +// Iterate the DAG through the DFS pre-order walk algorithm, going down +// as much as possible, then `NextChild` to the other siblings, and then up +// (to go down again). The position is saved throughout iterations (and +// can be previously set in `Seek`) allowing `Iterate` to be called +// repeatedly (after a `Pause`) to continue the iteration. +// +// This function returns the errors received from `down` (generated either +// inside the `Visitor` call or any other errors while fetching the child +// nodes), the rest of the move errors are handled within the function and +// are not returned. +func (w *Walker) Iterate(visitor Visitor) error { + + // Iterate until either: the end of the DAG (`errUpOnRoot`), a `Pause` + // is requested (`errPauseWalkOperation`) or an error happens (while + // going down). + for { + + // First, go down as much as possible. + for { + err := w.down(visitor) + + if err == ErrDownNoChild { + break + // Can't keep going down from this node, try to move Next. + } + + if err == errPauseWalkOperation { + return nil + // Pause requested, `errPauseWalkOperation` is just an internal + // error to signal to pause, don't pass it along. + } + + if err != nil { + return err + // `down` is the only movement that can return *any* error. + } + } + + // Can't move down anymore, turn to the next child in the `ActiveNode` + // to go down a different path. If there are no more child nodes + // available, go back up. + for { + err := w.NextChild() + if err == nil { + break + // No error, it turned to the next child. Try to go down again. + } + + // It can't go Next (`ErrNextNoChild`), try to move up. + err = w.up() + if err != nil { + // Can't move up, on the root again (`errUpOnRoot`). + return EndOfDag + } + + // Moved up, try `NextChild` again. + } + + // Turned to the next child (after potentially many up moves), + // try going down again. + } +} + +// Seek a specific node in a downwards manner. The `Visitor` should be +// used to steer the seek selecting at each node which child will the +// seek continue to (extending the `path` in that direction) or pause it +// (if the desired node has been found). The seek always starts from +// the root. It modifies the position so it shouldn't be used in-between +// `Iterate` calls (it can be used to set the position *before* iterating). +// If the visitor returns any non-`nil` errors the seek will stop. +// +// TODO: The seek could be extended to seek from the current position. +// (Is there something in the logic that would prevent it at the moment?) +func (w *Walker) Seek(visitor Visitor) error { + + if visitor == nil { + return ErrNilVisitor + // Although valid, there is no point in calling `Seek` without + // any extra logic, it would just go down to the leftmost leaf, + // so this would probably be a user error. + } + + // Go down until it the desired node is found (that will be signaled + // pausing the seek with `errPauseWalkOperation`) or a leaf node is + // reached (end of the DAG). + for { + err := w.down(visitor) + + if err == errPauseWalkOperation { + return nil + // Found the node, `errPauseWalkOperation` is just an internal + // error to signal to pause, don't pass it along. + } + + if err == ErrDownNoChild { + return nil + // Can't keep going down from this node, either at a leaf node + // or the `Visitor` has moved the child index past the + // available index (probably because none indicated that the + // target node could be down from there). + } + + if err != nil { + return err + // `down()` is the only movement that can return *any* error. + } + } + // TODO: Copied from the first part of `Iterate()` (although conceptually + // different from it). Could this be encapsulated in a function to avoid + // repeating code? The way the pause signal is handled it wouldn't seem + // very useful: the `errPauseWalkOperation` needs to be processed at this + // depth to return from the function (and pause the seek, returning + // from another function here wouldn't cause it to stop). +} + +// Go down one level in the DAG to the child of the `ActiveNode` +// pointed to by `ActiveChildIndex` and perform some logic on it by +// through the user-specified `visitor`. +// +// This should always be the first move in any walk operation +// (to visit the root node and move the `currentDepth` away +// from the negative value). +func (w *Walker) down(visitor Visitor) error { + child, err := w.fetchChild() + if err != nil { + return err + } + + w.extendPath(child) + + return w.visitActiveNode(visitor) +} + +// Fetch the child from the `ActiveNode` through the `FetchChild` +// method of the `NavigableNode` interface. +func (w *Walker) fetchChild() (NavigableNode, error) { + if w.currentDepth == -1 { + // First time `down()` is called, `currentDepth` is -1, + // return the root node. Don't check available child nodes + // (as the `Walker` is not actually on any node just yet + // and `ActiveChildIndex` is of no use yet). + return w.path[0], nil + } + + // Check if the child to fetch exists. + if w.ActiveChildIndex() >= w.ActiveNode().ChildTotal() { + return nil, ErrDownNoChild + } + + return w.ActiveNode().FetchChild(w.ctx, w.ActiveChildIndex()) + + // TODO: Maybe call `extendPath` here and hide it away + // from `down`. +} + +// Increase the `currentDepth` and extend the `path` to the fetched +// `child` node (which now becomes the new `ActiveNode`) +func (w *Walker) extendPath(child NavigableNode) { + w.currentDepth++ + + // Extend the slices if needed (doubling its capacity). + if w.currentDepth >= len(w.path) { + w.path = append(w.path, make([]NavigableNode, len(w.path))...) + w.childIndex = append(w.childIndex, make([]uint, len(w.childIndex))...) + // TODO: Check the performance of this grow mechanism. + } + + // `child` now becomes the `ActiveNode()`. + w.path[w.currentDepth] = child + w.childIndex[w.currentDepth] = 0 +} + +// Call the `Visitor` on the `ActiveNode`. This function should only be +// called from `down`. This is a wrapper function to `Visitor` to process +// the `Pause` signal and do other minor checks (taking this logic away +// from `down`). +func (w *Walker) visitActiveNode(visitor Visitor) error { + if visitor == nil { + return nil + // No need to check `pauseRequested` as `Pause` should + // only be called from within the `Visitor`. + } + + err := visitor(w.ActiveNode()) + + if w.pauseRequested { + // If a pause was requested make sure an error is returned + // that will cause the current walk operation to return. If + // `Visitor` didn't return an error set an artificial one + // generated by the `Walker`. + if err == nil { + err = errPauseWalkOperation + } + + w.pauseRequested = false + } + + return err +} + +// Go up from the `ActiveNode`. The only possible error this method +// can return is to signal it's already at the root and can't go up. +func (w *Walker) up() error { + if w.currentDepth < 1 { + return errUpOnRoot + } + + w.currentDepth-- + + // w.ActiveNode().Cleanup() + // If `Cleanup` is supported this would be the place to call it. + + return nil +} + +// NextChild increases the child index of the `ActiveNode` to point +// to the next child (which may exist or may be the end of the available +// child nodes). +// +// This method doesn't change the `ActiveNode`, it just changes where +// is it pointing to next, it could be interpreted as "turn to the next +// child". +func (w *Walker) NextChild() error { + w.incrementActiveChildIndex() + + if w.ActiveChildIndex() == w.ActiveNode().ChildTotal() { + return ErrNextNoChild + // At the end of the available children, signal it. + } + + return nil +} + +// incrementActiveChildIndex increments the child index of the `ActiveNode` to +// point to the next child (if it exists) or to the position past all of +// the child nodes (`ChildTotal`) to signal that all of its children have +// been visited/skipped (if already at that last position, do nothing). +func (w *Walker) incrementActiveChildIndex() { + if w.ActiveChildIndex()+1 <= w.ActiveNode().ChildTotal() { + w.childIndex[w.currentDepth]++ + } +} + +// ActiveChildIndex returns the index of the child the `ActiveNode()` +// is pointing to. +func (w *Walker) ActiveChildIndex() uint { + return w.childIndex[w.currentDepth] +} + +// SetContext changes the internal `Walker` (that is provided to the +// `NavigableNode`s when calling `FetchChild`) with the one passed +// as argument. +func (w *Walker) SetContext(ctx context.Context) { + w.ctx = ctx +} + +// Pause the current walk operation. This function must be called from +// within the `Visitor` function. +func (w *Walker) Pause() { + w.pauseRequested = true +} diff --git a/vendor/github.com/ipfs/go-log/.travis.yml b/vendor/github.com/ipfs/go-log/.travis.yml new file mode 100644 index 0000000000..923835bc58 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/.travis.yml @@ -0,0 +1,31 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-log/LICENSE b/vendor/github.com/ipfs/go-log/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-log/README.md b/vendor/github.com/ipfs/go-log/README.md new file mode 100644 index 0000000000..b0b5b9f6fa --- /dev/null +++ b/vendor/github.com/ipfs/go-log/README.md @@ -0,0 +1,79 @@ +# go-log + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-log?status.svg)](https://godoc.org/github.com/ipfs/go-log) +[![Build Status](https://travis-ci.org/ipfs/go-log.svg?branch=master)](https://travis-ci.org/ipfs/go-log) + + + + +> The logging library used by go-ipfs + +It currently uses a modified version of [go-logging](https://github.com/whyrusleeping/go-logging) to implement the standard printf-style log output. + +## Install + +```sh +go get github.com/ipfs/go-log +``` + +## Usage + +Once the package is imported under the name `logging`, an instance of `EventLogger` can be created like so: + +```go +var log = logging.Logger("subsystem name") +``` + +It can then be used to emit log messages, either plain printf-style messages at six standard levels or structured messages using `Start`, `StartFromParentState`, `Finish` and `FinishWithErr` methods. + +## Example + +```go +func (s *Session) GetBlock(ctx context.Context, c *cid.Cid) (blk blocks.Block, err error) { + + // Starts Span called "Session.GetBlock", associates with `ctx` + ctx = log.Start(ctx, "Session.GetBlock") + + // defer so `blk` and `err` can be evaluated after call + defer func() { + // tag span associated with `ctx` + log.SetTags(ctx, map[string]interface{}{ + "cid": c, + "block", blk, + }) + // if err is non-nil tag the span with an error + log.FinishWithErr(ctx, err) + }() + + if shouldStartSomething() { + // log message on span associated with `ctx` + log.LogKV(ctx, "startSomething", true) + } + ... +} +``` +## Tracing + +`go-log` wraps the [opentracing-go](https://github.com/opentracing/opentracing-go) methods - `StartSpan`, `Finish`, `LogKV`, and `SetTag`. + +`go-log` implements its own tracer - `loggabletracer` - based on the [basictracer-go](https://github.com/opentracing/basictracer-go) implementation. If there is an active [`WriterGroup`](https://github.com/ipfs/go-log/blob/master/writer/option.go) the `loggabletracer` will [record](https://github.com/ipfs/go-log/blob/master/tracer/recorder.go) span data to the `WriterGroup`. An example of this can be seen in the [`log tail`](https://github.com/ipfs/go-ipfs/blob/master/core/commands/log.go) command of `go-ipfs`. + +Third party tracers may be used by calling `opentracing.SetGlobalTracer()` with your desired tracing implementation. An example of this can be seen using the [`go-jaeger-plugin`](https://github.com/ipfs/go-jaeger-plugin) and the `go-ipfs` [tracer plugin](https://github.com/ipfs/go-ipfs/blob/master/plugin/tracer.go) + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-log/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md) + +## License + +MIT diff --git a/vendor/github.com/ipfs/go-log/context.go b/vendor/github.com/ipfs/go-log/context.go new file mode 100644 index 0000000000..b8ef5bc836 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/context.go @@ -0,0 +1,38 @@ +package log + +import ( + "context" + "errors" +) + +type key int + +const metadataKey key = 0 + +// ContextWithLoggable returns a derived context which contains the provided +// Loggable. Any Events logged with the derived context will include the +// provided Loggable. +func ContextWithLoggable(ctx context.Context, l Loggable) context.Context { + existing, err := MetadataFromContext(ctx) + if err != nil { + // context does not contain meta. just set the new metadata + child := context.WithValue(ctx, metadataKey, Metadata(l.Loggable())) + return child + } + + merged := DeepMerge(existing, l.Loggable()) + child := context.WithValue(ctx, metadataKey, merged) + return child +} + +// MetadataFromContext extracts Matadata from a given context's value. +func MetadataFromContext(ctx context.Context) (Metadata, error) { + value := ctx.Value(metadataKey) + if value != nil { + metadata, ok := value.(Metadata) + if ok { + return metadata, nil + } + } + return nil, errors.New("context contains no metadata") +} diff --git a/vendor/github.com/ipfs/go-log/entry.go b/vendor/github.com/ipfs/go-log/entry.go new file mode 100644 index 0000000000..63c02135c8 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/entry.go @@ -0,0 +1,7 @@ +package log + +type entry struct { + loggables []Loggable + system string + event string +} diff --git a/vendor/github.com/ipfs/go-log/go.mod b/vendor/github.com/ipfs/go-log/go.mod new file mode 100644 index 0000000000..7565c6d1d7 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/go.mod @@ -0,0 +1,10 @@ +module github.com/ipfs/go-log + +require ( + github.com/gogo/protobuf v1.3.1 + github.com/ipfs/go-log/v2 v2.0.5 + github.com/opentracing/opentracing-go v1.1.0 + go.uber.org/zap v1.14.1 +) + +go 1.12 diff --git a/vendor/github.com/ipfs/go-log/go.sum b/vendor/github.com/ipfs/go-log/go.sum new file mode 100644 index 0000000000..ab58ec8bd9 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/go.sum @@ -0,0 +1,65 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/github.com/ipfs/go-log/levels.go b/vendor/github.com/ipfs/go-log/levels.go new file mode 100644 index 0000000000..22e6b8828d --- /dev/null +++ b/vendor/github.com/ipfs/go-log/levels.go @@ -0,0 +1,30 @@ +package log + +import ( + log2 "github.com/ipfs/go-log/v2" +) + +// LogLevel represents a log severity level. Use the package variables as an +// enum. +type LogLevel = log2.LogLevel + +var ( + LevelDebug = log2.LevelDebug + LevelInfo = log2.LevelInfo + LevelWarn = log2.LevelWarn + LevelError = log2.LevelError + LevelDPanic = log2.LevelDPanic + LevelPanic = log2.LevelPanic + LevelFatal = log2.LevelFatal +) + +// LevelFromString parses a string-based level and returns the corresponding +// LogLevel. +// +// Supported strings are: DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL, and +// their lower-case forms. +// +// The returned LogLevel must be discarded if error is not nil. +func LevelFromString(level string) (LogLevel, error) { + return log2.LevelFromString(level) +} diff --git a/vendor/github.com/ipfs/go-log/log.go b/vendor/github.com/ipfs/go-log/log.go new file mode 100644 index 0000000000..5581d508bc --- /dev/null +++ b/vendor/github.com/ipfs/go-log/log.go @@ -0,0 +1,420 @@ +// Package log is the logging library used by IPFS +// (https://github.com/ipfs/go-ipfs). It uses a modified version of +// https://godoc.org/github.com/whyrusleeping/go-logging . +package log + +import ( + "bytes" + "context" + "encoding/json" + "path" + "runtime" + "time" + + log2 "github.com/ipfs/go-log/v2" + writer "github.com/ipfs/go-log/writer" + + opentrace "github.com/opentracing/opentracing-go" + otExt "github.com/opentracing/opentracing-go/ext" + "go.uber.org/zap" +) + +var log = Logger("eventlog") + +// StandardLogger provides API compatibility with standard printf loggers +// eg. go-logging +type StandardLogger interface { + log2.StandardLogger + // Deprecated use Warn + Warning(args ...interface{}) + // Deprecated use Warnf + Warningf(format string, args ...interface{}) +} + +// EventLogger extends the StandardLogger interface to allow for log items +// containing structured metadata +type EventLogger interface { + StandardLogger + + // Event merges structured data from the provided inputs into a single + // machine-readable log event. + // + // If the context contains metadata, a copy of this is used as the base + // metadata accumulator. + // + // If one or more loggable objects are provided, these are deep-merged into base blob. + // + // Next, the event name is added to the blob under the key "event". If + // the key "event" already exists, it will be over-written. + // + // Finally the timestamp and package name are added to the accumulator and + // the metadata is logged. + // DEPRECATED + // Deprecated: Stop using go-log for event logging + Event(ctx context.Context, event string, m ...Loggable) + + // DEPRECATED + // Deprecated: Stop using go-log for event logging + EventBegin(ctx context.Context, event string, m ...Loggable) *EventInProgress + + // Start starts an opentracing span with `name`, using + // any Span found within `ctx` as a ChildOfRef. If no such parent could be + // found, Start creates a root (parentless) Span. + // + // The return value is a context.Context object built around the + // returned Span. + // + // Example usage: + // + // SomeFunction(ctx context.Context, ...) { + // ctx := log.Start(ctx, "SomeFunction") + // defer log.Finish(ctx) + // ... + // } + // Deprecated: Stop using go-log for event logging + Start(ctx context.Context, name string) context.Context + + // StartFromParentState starts an opentracing span with `name`, using + // any Span found within `ctx` as a ChildOfRef. If no such parent could be + // found, StartSpanFromParentState creates a root (parentless) Span. + // + // StartFromParentState will attempt to deserialize a SpanContext from `parent`, + // using any Span found within to continue the trace + // + // The return value is a context.Context object built around the + // returned Span. + // + // An error is returned when `parent` cannot be deserialized to a SpanContext + // + // Example usage: + // + // SomeFunction(ctx context.Context, bParent []byte) { + // ctx := log.StartFromParentState(ctx, "SomeFunction", bParent) + // defer log.Finish(ctx) + // ... + // } + // Deprecated: Stop using go-log for event logging + StartFromParentState(ctx context.Context, name string, parent []byte) (context.Context, error) + + // Finish completes the span associated with `ctx`. + // + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + // Finish will do its best to notify (log) when used in correctly + // .e.g called twice, or called on a spanless `ctx` + // Deprecated: Stop using go-log for event logging + Finish(ctx context.Context) + + // FinishWithErr completes the span associated with `ctx` and also calls + // SetErr if `err` is non-nil + // + // FinishWithErr() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + // FinishWithErr will do its best to notify (log) when used in correctly + // .e.g called twice, or called on a spanless `ctx` + // Deprecated: Stop using go-log for event logging + FinishWithErr(ctx context.Context, err error) + + // SetErr tags the span associated with `ctx` to reflect an error occured, and + // logs the value `err` under key `error`. + // Deprecated: Stop using go-log for event logging + SetErr(ctx context.Context, err error) + + // LogKV records key:value logging data about an event stored in `ctx` + // Eexample: + // log.LogKV( + // "error", "resolve failure", + // "type", "cache timeout", + // "waited.millis", 1500) + // Deprecated: Stop using go-log for event logging + LogKV(ctx context.Context, alternatingKeyValues ...interface{}) + + // SetTag tags key `k` and value `v` on the span associated with `ctx` + // Deprecated: Stop using go-log for event logging + SetTag(ctx context.Context, key string, value interface{}) + + // SetTags tags keys from the `tags` maps on the span associated with `ctx` + // Example: + // log.SetTags(ctx, map[string]{ + // "type": bizStruct, + // "request": req, + // }) + // Deprecated: Stop using go-log for event logging + SetTags(ctx context.Context, tags map[string]interface{}) + + // SerializeContext takes the SpanContext instance stored in `ctx` and Seralizes + // it to bytes. An error is returned if the `ctx` cannot be serialized to + // a bytes array + // Deprecated: Stop using go-log for event logging + SerializeContext(ctx context.Context) ([]byte, error) +} + +var _ EventLogger = Logger("test-logger") + +// Logger retrieves an event logger by name +func Logger(system string) *ZapEventLogger { + if len(system) == 0 { + setuplog := Logger("setup-logger") + setuplog.Error("Missing name parameter") + system = "undefined" + } + logger := log2.Logger(system) + return &ZapEventLogger{system: system, SugaredLogger: logger.SugaredLogger} +} + +// ZapEventLogger implements the EventLogger and wraps a go-logging Logger +type ZapEventLogger struct { + zap.SugaredLogger + + system string + // TODO add log-level +} + +// Deprecated: use Warn +func (el *ZapEventLogger) Warning(args ...interface{}) { + el.Warn(args...) +} + +// Deprecated: use Warnf +func (el *ZapEventLogger) Warningf(format string, args ...interface{}) { + el.Warnf(format, args...) +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) Start(ctx context.Context, operationName string) context.Context { + span, ctx := opentrace.StartSpanFromContext(ctx, operationName) + span.SetTag("system", el.system) + return ctx +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) StartFromParentState(ctx context.Context, operationName string, parent []byte) (context.Context, error) { + sc, err := deserializeContext(parent) + if err != nil { + return nil, err + } + + //TODO RPCServerOption is probably not the best tag, as this is likely from a peer + span, ctx := opentrace.StartSpanFromContext(ctx, operationName, otExt.RPCServerOption(sc)) + span.SetTag("system", el.system) + return ctx, nil +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) SerializeContext(ctx context.Context) ([]byte, error) { + gTracer := opentrace.GlobalTracer() + b := make([]byte, 0) + carrier := bytes.NewBuffer(b) + span := opentrace.SpanFromContext(ctx) + if err := gTracer.Inject(span.Context(), opentrace.Binary, carrier); err != nil { + return nil, err + } + return carrier.Bytes(), nil +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) LogKV(ctx context.Context, alternatingKeyValues ...interface{}) { + span := opentrace.SpanFromContext(ctx) + if span == nil { + _, file, line, _ := runtime.Caller(1) + log.Errorf("LogKV with no Span in context called on %s:%d", path.Base(file), line) + return + } + span.LogKV(alternatingKeyValues...) +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) SetTag(ctx context.Context, k string, v interface{}) { + span := opentrace.SpanFromContext(ctx) + if span == nil { + _, file, line, _ := runtime.Caller(1) + log.Errorf("SetTag with no Span in context called on %s:%d", path.Base(file), line) + return + } + span.SetTag(k, v) +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) SetTags(ctx context.Context, tags map[string]interface{}) { + span := opentrace.SpanFromContext(ctx) + if span == nil { + _, file, line, _ := runtime.Caller(1) + log.Errorf("SetTags with no Span in context called on %s:%d", path.Base(file), line) + return + } + for k, v := range tags { + span.SetTag(k, v) + } +} + +func (el *ZapEventLogger) setErr(ctx context.Context, err error, skip int) { + span := opentrace.SpanFromContext(ctx) + if span == nil { + _, file, line, _ := runtime.Caller(skip) + log.Errorf("SetErr with no Span in context called on %s:%d", path.Base(file), line) + return + } + if err == nil { + return + } + + otExt.Error.Set(span, true) + span.LogKV("error", err.Error()) +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) SetErr(ctx context.Context, err error) { + el.setErr(ctx, err, 1) +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) Finish(ctx context.Context) { + span := opentrace.SpanFromContext(ctx) + if span == nil { + _, file, line, _ := runtime.Caller(1) + log.Errorf("Finish with no Span in context called on %s:%d", path.Base(file), line) + return + } + span.Finish() +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) FinishWithErr(ctx context.Context, err error) { + el.setErr(ctx, err, 2) + el.Finish(ctx) +} + +func deserializeContext(bCtx []byte) (opentrace.SpanContext, error) { + gTracer := opentrace.GlobalTracer() + carrier := bytes.NewReader(bCtx) + spanContext, err := gTracer.Extract(opentrace.Binary, carrier) + if err != nil { + log.Warning("Failed to deserialize context %s", err) + return nil, err + } + return spanContext, nil +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) EventBegin(ctx context.Context, event string, metadata ...Loggable) *EventInProgress { + ctx = el.Start(ctx, event) + + for _, m := range metadata { + for l, v := range m.Loggable() { + el.LogKV(ctx, l, v) + } + } + + eip := &EventInProgress{} + eip.doneFunc = func(additional []Loggable) { + // anything added during the operation + // e.g. deprecated methods event.Append(...) or event.SetError(...) + for _, m := range eip.loggables { + for l, v := range m.Loggable() { + el.LogKV(ctx, l, v) + } + } + el.Finish(ctx) + } + return eip +} + +// Deprecated: Stop using go-log for event logging +func (el *ZapEventLogger) Event(ctx context.Context, event string, metadata ...Loggable) { + + // short circuit if theres nothing to write to + if !writer.WriterGroup.Active() { + return + } + + // Collect loggables for later logging + var loggables []Loggable + + // get any existing metadata from the context + existing, err := MetadataFromContext(ctx) + if err != nil { + existing = Metadata{} + } + loggables = append(loggables, existing) + loggables = append(loggables, metadata...) + + e := entry{ + loggables: loggables, + system: el.system, + event: event, + } + + accum := Metadata{} + for _, loggable := range e.loggables { + accum = DeepMerge(accum, loggable.Loggable()) + } + + // apply final attributes to reserved keys + // TODO accum["level"] = level + accum["event"] = e.event + accum["system"] = e.system + accum["time"] = FormatRFC3339(time.Now()) + + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + encoder.SetEscapeHTML(false) + err = encoder.Encode(accum) + if err != nil { + el.Errorf("ERROR FORMATTING EVENT ENTRY: %s", err) + return + } + + _, _ = writer.WriterGroup.Write(buf.Bytes()) +} + +// DEPRECATED +// EventInProgress represent and event which is happening +// Deprecated: Stop using go-log for event logging +type EventInProgress struct { + loggables []Loggable + doneFunc func([]Loggable) +} + +// DEPRECATED use `LogKV` or `SetTag` +// Append adds loggables to be included in the call to Done +func (eip *EventInProgress) Append(l Loggable) { + eip.loggables = append(eip.loggables, l) +} + +// DEPRECATED use `SetError(ctx, error)` +// SetError includes the provided error +func (eip *EventInProgress) SetError(err error) { + eip.loggables = append(eip.loggables, LoggableMap{ + "error": err.Error(), + }) +} + +// Done creates a new Event entry that includes the duration and appended +// loggables. +// Deprecated: Stop using go-log for event logging +func (eip *EventInProgress) Done() { + eip.doneFunc(eip.loggables) // create final event with extra data +} + +// DEPRECATED use `FinishWithErr` +// DoneWithErr creates a new Event entry that includes the duration and appended +// loggables. DoneWithErr accepts an error, if err is non-nil, it is set on +// the EventInProgress. Otherwise the logic is the same as the `Done()` method +func (eip *EventInProgress) DoneWithErr(err error) { + if err != nil { + eip.SetError(err) + } + eip.doneFunc(eip.loggables) +} + +// Close is an alias for done +// Deprecated: Stop using go-log for event logging +func (eip *EventInProgress) Close() error { + eip.Done() + return nil +} + +// FormatRFC3339 returns the given time in UTC with RFC3999Nano format. +func FormatRFC3339(t time.Time) string { + return t.UTC().Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/ipfs/go-log/loggable.go b/vendor/github.com/ipfs/go-log/loggable.go new file mode 100644 index 0000000000..f4edb26845 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/loggable.go @@ -0,0 +1,42 @@ +package log + +// Loggable describes objects that can be marshalled into Metadata for logging +type Loggable interface { + Loggable() map[string]interface{} +} + +// LoggableMap is just a generic map keyed by string. It +// implements the Loggable interface. +type LoggableMap map[string]interface{} + +// Loggable implements the Loggable interface for LoggableMap +func (l LoggableMap) Loggable() map[string]interface{} { + return l +} + +// LoggableF converts a func into a Loggable +type LoggableF func() map[string]interface{} + +// Loggable implements the Loggable interface by running +// the LoggableF function. +func (l LoggableF) Loggable() map[string]interface{} { + return l() +} + +// Deferred returns a LoggableF where the execution of the +// provided function is deferred. +func Deferred(key string, f func() string) Loggable { + function := func() map[string]interface{} { + return map[string]interface{}{ + key: f(), + } + } + return LoggableF(function) +} + +// Pair returns a Loggable where key is paired to Loggable. +func Pair(key string, l Loggable) Loggable { + return LoggableMap{ + key: l, + } +} diff --git a/vendor/github.com/ipfs/go-log/metadata.go b/vendor/github.com/ipfs/go-log/metadata.go new file mode 100644 index 0000000000..07947b54a0 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/metadata.go @@ -0,0 +1,77 @@ +package log + +import ( + "encoding/json" + "errors" + "reflect" +) + +// Metadata is a convenience type for generic maps +type Metadata map[string]interface{} + +// DeepMerge merges the second Metadata parameter into the first. +// Nested Metadata are merged recursively. Primitives are over-written. +func DeepMerge(b, a Metadata) Metadata { + out := Metadata{} + for k, v := range b { + out[k] = v + } + for k, v := range a { + + maybe, err := Metadatify(v) + if err != nil { + // if the new value is not meta. just overwrite the dest vaue + if out[k] != nil { + log.Debugf("Overwriting key: %s, old: %s, new: %s", k, out[k], v) + } + out[k] = v + continue + } + + // it is meta. What about dest? + outv, exists := out[k] + if !exists { + // the new value is meta, but there's no dest value. just write it + out[k] = v + continue + } + + outMetadataValue, err := Metadatify(outv) + if err != nil { + // the new value is meta and there's a dest value, but the dest + // value isn't meta. just overwrite + out[k] = v + continue + } + + // both are meta. merge them. + out[k] = DeepMerge(outMetadataValue, maybe) + } + return out +} + +// Loggable implements the Loggable interface. +func (m Metadata) Loggable() map[string]interface{} { + // NB: method defined on value to avoid de-referencing nil Metadata + return m +} + +// JsonString returns the marshaled JSON string for the metadata. +func (m Metadata) JsonString() (string, error) { + // NB: method defined on value + b, err := json.Marshal(m) + return string(b), err +} + +// Metadatify converts maps into Metadata. +func Metadatify(i interface{}) (Metadata, error) { + value := reflect.ValueOf(i) + if value.Kind() == reflect.Map { + m := map[string]interface{}{} + for _, k := range value.MapKeys() { + m[k.String()] = value.MapIndex(k).Interface() + } + return Metadata(m), nil + } + return nil, errors.New("is not a map") +} diff --git a/vendor/github.com/ipfs/go-log/oldlog.go b/vendor/github.com/ipfs/go-log/oldlog.go new file mode 100644 index 0000000000..f0ad558d0a --- /dev/null +++ b/vendor/github.com/ipfs/go-log/oldlog.go @@ -0,0 +1,68 @@ +package log + +import ( + tracer "github.com/ipfs/go-log/tracer" + lwriter "github.com/ipfs/go-log/writer" + "os" + + opentrace "github.com/opentracing/opentracing-go" + + log2 "github.com/ipfs/go-log/v2" +) + +func init() { + SetupLogging() +} + +// Logging environment variables +const ( + envTracingFile = "GOLOG_TRACING_FILE" // /path/to/file +) + +func SetupLogging() { + // We're importing V2. Given that we setup logging on init, we should be + // fine skipping the rest of the initialization. + + // TracerPlugins are instantiated after this, so use loggable tracer + // by default, if a TracerPlugin is added it will override this + lgblRecorder := tracer.NewLoggableRecorder() + lgblTracer := tracer.New(lgblRecorder) + opentrace.SetGlobalTracer(lgblTracer) + + if tracingfp := os.Getenv(envTracingFile); len(tracingfp) > 0 { + f, err := os.Create(tracingfp) + if err != nil { + log.Error("failed to create tracing file: %s", tracingfp) + } else { + lwriter.WriterGroup.AddWriter(f) + } + } +} + +// SetDebugLogging calls SetAllLoggers with logging.DEBUG +func SetDebugLogging() { + log2.SetDebugLogging() +} + +// SetAllLoggers changes the logging level of all loggers to lvl +func SetAllLoggers(lvl LogLevel) { + log2.SetAllLoggers(lvl) +} + +// SetLogLevel changes the log level of a specific subsystem +// name=="*" changes all subsystems +func SetLogLevel(name, level string) error { + return log2.SetLogLevel(name, level) +} + +// SetLogLevelRegex sets all loggers to level `l` that match expression `e`. +// An error is returned if `e` fails to compile. +func SetLogLevelRegex(e, l string) error { + return log2.SetLogLevelRegex(e, l) +} + +// GetSubsystems returns a slice containing the +// names of the current loggers +func GetSubsystems() []string { + return log2.GetSubsystems() +} diff --git a/vendor/github.com/ipfs/go-log/package.json b/vendor/github.com/ipfs/go-log/package.json new file mode 100644 index 0000000000..adcf8cd08e --- /dev/null +++ b/vendor/github.com/ipfs/go-log/package.json @@ -0,0 +1,41 @@ +{ + "bugs": { + "url": "https://github.com/ipfs/go-log" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-log" + }, + "gxDependencies": [ + { + "author": "whyrusleeping", + "hash": "QmcaSwFc5RBg8yCq54QURwEU4nwjfCpjbpmaAm4VbdGLKv", + "name": "go-logging", + "version": "0.0.0" + }, + { + "author": "frist", + "hash": "QmWLWmRVSiagqP15jczsGME1qpob6HDbtbHAY2he9W5iUo", + "name": "opentracing-go", + "version": "0.0.3" + }, + { + "author": "mattn", + "hash": "QmTsHcKgTQ4VeYZd8eKYpTXeLW7KNwkRD9wjnrwsV2sToq", + "name": "go-colorable", + "version": "0.2.0" + }, + { + "author": "whyrusleeping", + "hash": "QmddjPSGZb3ieihSseFeCfVRpZzcqczPNsD2DvarSwnjJB", + "name": "gogo-protobuf", + "version": "1.2.1" + } + ], + "gxVersion": "0.12.1", + "language": "go", + "license": "", + "name": "go-log", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.5.9" +} + diff --git a/vendor/github.com/ipfs/go-log/tracer/LICENSE b/vendor/github.com/ipfs/go-log/tracer/LICENSE new file mode 100644 index 0000000000..148509a403 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 The OpenTracing Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ipfs/go-log/tracer/context.go b/vendor/github.com/ipfs/go-log/tracer/context.go new file mode 100644 index 0000000000..f1ebf61f67 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/context.go @@ -0,0 +1,42 @@ +package loggabletracer + +// SpanContext holds the basic Span metadata. +type SpanContext struct { + // A probabilistically unique identifier for a [multi-span] trace. + TraceID uint64 + + // A probabilistically unique identifier for a span. + SpanID uint64 + + // Whether the trace is sampled. + Sampled bool + + // The span's associated baggage. + Baggage map[string]string // initialized on first use +} + +// ForeachBaggageItem belongs to the opentracing.SpanContext interface +func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { + for k, v := range c.Baggage { + if !handler(k, v) { + break + } + } +} + +// WithBaggageItem returns an entirely new loggabletracer SpanContext with the +// given key:value baggage pair set. +func (c SpanContext) WithBaggageItem(key, val string) SpanContext { + var newBaggage map[string]string + if c.Baggage == nil { + newBaggage = map[string]string{key: val} + } else { + newBaggage = make(map[string]string, len(c.Baggage)+1) + for k, v := range c.Baggage { + newBaggage[k] = v + } + newBaggage[key] = val + } + // Use positional parameters so the compiler will help catch new fields. + return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage} +} diff --git a/vendor/github.com/ipfs/go-log/tracer/debug.go b/vendor/github.com/ipfs/go-log/tracer/debug.go new file mode 100644 index 0000000000..8c302b3703 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/debug.go @@ -0,0 +1,78 @@ +package loggabletracer + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "sync" +) + +const debugGoroutineIDTag = "_initial_goroutine" + +type errAssertionFailed struct { + span *spanImpl + msg string +} + +// Error implements the error interface. +func (err *errAssertionFailed) Error() string { + return fmt.Sprintf("%s:\n%+v", err.msg, err.span) +} + +func (s *spanImpl) Lock() { + s.Mutex.Lock() + s.maybeAssertSanityLocked() +} + +func (s *spanImpl) maybeAssertSanityLocked() { + if s.tracer == nil { + s.Mutex.Unlock() + panic(&errAssertionFailed{span: s, msg: "span used after call to Finish()"}) + } + if s.tracer.options.DebugAssertSingleGoroutine { + startID := curGoroutineID() + curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64) + if !ok { + // This is likely invoked in the context of the SetTag which sets + // debugGoroutineTag. + return + } + if startID != curID { + s.Mutex.Unlock() + panic(&errAssertionFailed{ + span: s, + msg: fmt.Sprintf("span started on goroutine %d, but now running on %d", startID, curID), + }) + } + } +} + +var goroutineSpace = []byte("goroutine ") +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// Credit to @bradfitz: +// https://github.com/golang/net/blob/master/http2/gotrack.go#L51 +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := strconv.ParseUint(string(b), 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} diff --git a/vendor/github.com/ipfs/go-log/tracer/event.go b/vendor/github.com/ipfs/go-log/tracer/event.go new file mode 100644 index 0000000000..9dbcb76a69 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/event.go @@ -0,0 +1,62 @@ +package loggabletracer + +import "github.com/opentracing/opentracing-go" + +// A SpanEvent is emitted when a mutating command is called on a Span. +type SpanEvent interface{} + +// EventCreate is emitted when a Span is created. +type EventCreate struct{ OperationName string } + +// EventTag is received when SetTag is called. +type EventTag struct { + Key string + Value interface{} +} + +// EventBaggage is received when SetBaggageItem is called. +type EventBaggage struct { + Key, Value string +} + +// EventLogFields is received when LogFields or LogKV is called. +type EventLogFields opentracing.LogRecord + +// EventLog is received when Log (or one of its derivatives) is called. +// +// DEPRECATED +type EventLog opentracing.LogData + +// EventFinish is received when Finish is called. +type EventFinish RawSpan + +func (s *spanImpl) onCreate(opName string) { + if s.event != nil { + s.event(EventCreate{OperationName: opName}) + } +} +func (s *spanImpl) onTag(key string, value interface{}) { + if s.event != nil { + s.event(EventTag{Key: key, Value: value}) + } +} +func (s *spanImpl) onLog(ld opentracing.LogData) { + if s.event != nil { + s.event(EventLog(ld)) + } +} +func (s *spanImpl) onLogFields(lr opentracing.LogRecord) { + if s.event != nil { + s.event(EventLogFields(lr)) + } +} +func (s *spanImpl) onBaggage(key, value string) { + if s.event != nil { + s.event(EventBaggage{Key: key, Value: value}) + } +} +func (s *spanImpl) onFinish(sp RawSpan) { + if s.event != nil { + s.event(EventFinish(sp)) + } +} diff --git a/vendor/github.com/ipfs/go-log/tracer/propagation.go b/vendor/github.com/ipfs/go-log/tracer/propagation.go new file mode 100644 index 0000000000..bb2106597a --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/propagation.go @@ -0,0 +1,61 @@ +package loggabletracer + +import opentracing "github.com/opentracing/opentracing-go" + +type accessorPropagator struct { + tracer *LoggableTracer +} + +// DelegatingCarrier is a flexible carrier interface which can be implemented +// by types which have a means of storing the trace metadata and already know +// how to serialize themselves (for example, protocol buffers). +type DelegatingCarrier interface { + SetState(traceID, spanID uint64, sampled bool) + State() (traceID, spanID uint64, sampled bool) + SetBaggageItem(key, value string) + GetBaggage(func(key, value string)) +} + +func (p *accessorPropagator) Inject( + spanContext opentracing.SpanContext, + carrier interface{}, +) error { + dc, ok := carrier.(DelegatingCarrier) + if !ok || dc == nil { + return opentracing.ErrInvalidCarrier + } + sc, ok := spanContext.(SpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + dc.SetState(sc.TraceID, sc.SpanID, sc.Sampled) + for k, v := range sc.Baggage { + dc.SetBaggageItem(k, v) + } + return nil +} + +func (p *accessorPropagator) Extract( + carrier interface{}, +) (opentracing.SpanContext, error) { + dc, ok := carrier.(DelegatingCarrier) + if !ok || dc == nil { + return nil, opentracing.ErrInvalidCarrier + } + + traceID, spanID, sampled := dc.State() + sc := SpanContext{ + TraceID: traceID, + SpanID: spanID, + Sampled: sampled, + Baggage: nil, + } + dc.GetBaggage(func(k, v string) { + if sc.Baggage == nil { + sc.Baggage = map[string]string{} + } + sc.Baggage[k] = v + }) + + return sc, nil +} diff --git a/vendor/github.com/ipfs/go-log/tracer/propagation_ot.go b/vendor/github.com/ipfs/go-log/tracer/propagation_ot.go new file mode 100644 index 0000000000..28cf526cf8 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/propagation_ot.go @@ -0,0 +1,178 @@ +package loggabletracer + +import ( + "encoding/binary" + "io" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/ipfs/go-log/tracer/wire" + opentracing "github.com/opentracing/opentracing-go" +) + +type textMapPropagator struct { +} +type binaryPropagator struct { +} + +const ( + prefixTracerState = "ot-tracer-" + prefixBaggage = "ot-baggage-" + + tracerStateFieldCount = 3 + fieldNameTraceID = prefixTracerState + "traceid" + fieldNameSpanID = prefixTracerState + "spanid" + fieldNameSampled = prefixTracerState + "sampled" +) + +func (p *textMapPropagator) Inject( + spanContext opentracing.SpanContext, + opaqueCarrier interface{}, +) error { + sc, ok := spanContext.(SpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + carrier, ok := opaqueCarrier.(opentracing.TextMapWriter) + if !ok { + return opentracing.ErrInvalidCarrier + } + carrier.Set(fieldNameTraceID, strconv.FormatUint(sc.TraceID, 16)) + carrier.Set(fieldNameSpanID, strconv.FormatUint(sc.SpanID, 16)) + carrier.Set(fieldNameSampled, strconv.FormatBool(sc.Sampled)) + + for k, v := range sc.Baggage { + carrier.Set(prefixBaggage+k, v) + } + return nil +} + +func (p *textMapPropagator) Extract( + opaqueCarrier interface{}, +) (opentracing.SpanContext, error) { + carrier, ok := opaqueCarrier.(opentracing.TextMapReader) + if !ok { + return nil, opentracing.ErrInvalidCarrier + } + requiredFieldCount := 0 + var traceID, spanID uint64 + var sampled bool + var err error + decodedBaggage := make(map[string]string) + err = carrier.ForeachKey(func(k, v string) error { + switch strings.ToLower(k) { + case fieldNameTraceID: + traceID, err = strconv.ParseUint(v, 16, 64) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + case fieldNameSpanID: + spanID, err = strconv.ParseUint(v, 16, 64) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + case fieldNameSampled: + sampled, err = strconv.ParseBool(v) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + default: + lowercaseK := strings.ToLower(k) + if strings.HasPrefix(lowercaseK, prefixBaggage) { + decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v + } + // Balance off the requiredFieldCount++ just below... + requiredFieldCount-- + } + requiredFieldCount++ + return nil + }) + if err != nil { + return nil, err + } + if requiredFieldCount < tracerStateFieldCount { + if requiredFieldCount == 0 { + return nil, opentracing.ErrSpanContextNotFound + } + return nil, opentracing.ErrSpanContextCorrupted + } + + return SpanContext{ + TraceID: traceID, + SpanID: spanID, + Sampled: sampled, + Baggage: decodedBaggage, + }, nil +} + +func (p *binaryPropagator) Inject( + spanContext opentracing.SpanContext, + opaqueCarrier interface{}, +) error { + sc, ok := spanContext.(SpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + carrier, ok := opaqueCarrier.(io.Writer) + if !ok { + return opentracing.ErrInvalidCarrier + } + + state := wire.TracerState{} + state.TraceId = sc.TraceID + state.SpanId = sc.SpanID + state.Sampled = sc.Sampled + state.BaggageItems = sc.Baggage + + b, err := proto.Marshal(&state) + if err != nil { + return err + } + + // Write the length of the marshalled binary to the writer. + length := uint32(len(b)) + if err := binary.Write(carrier, binary.BigEndian, &length); err != nil { + return err + } + + _, err = carrier.Write(b) + return err +} + +func (p *binaryPropagator) Extract( + opaqueCarrier interface{}, +) (opentracing.SpanContext, error) { + carrier, ok := opaqueCarrier.(io.Reader) + if !ok { + return nil, opentracing.ErrInvalidCarrier + } + + // Read the length of marshalled binary. io.ReadAll isn't that performant + // since it keeps resizing the underlying buffer as it encounters more bytes + // to read. By reading the length, we can allocate a fixed sized buf and read + // the exact amount of bytes into it. + var length uint32 + if err := binary.Read(carrier, binary.BigEndian, &length); err != nil { + return nil, opentracing.ErrSpanContextCorrupted + } + buf := make([]byte, length) + if n, err := carrier.Read(buf); err != nil { + if n > 0 { + return nil, opentracing.ErrSpanContextCorrupted + } + return nil, opentracing.ErrSpanContextNotFound + } + + ctx := wire.TracerState{} + if err := proto.Unmarshal(buf, &ctx); err != nil { + return nil, opentracing.ErrSpanContextCorrupted + } + + return SpanContext{ + TraceID: ctx.TraceId, + SpanID: ctx.SpanId, + Sampled: ctx.Sampled, + Baggage: ctx.BaggageItems, + }, nil +} diff --git a/vendor/github.com/ipfs/go-log/tracer/raw.go b/vendor/github.com/ipfs/go-log/tracer/raw.go new file mode 100644 index 0000000000..759454274a --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/raw.go @@ -0,0 +1,34 @@ +package loggabletracer + +import ( + "time" + + opentracing "github.com/opentracing/opentracing-go" +) + +// RawSpan encapsulates all state associated with a (finished) Span. +type RawSpan struct { + // Those recording the RawSpan should also record the contents of its + // SpanContext. + Context SpanContext + + // The SpanID of this SpanContext's first intra-trace reference (i.e., + // "parent"), or 0 if there is no parent. + ParentSpanID uint64 + + // The name of the "operation" this span is an instance of. (Called a "span + // name" in some implementations) + Operation string + + // We store rather than so that only + // one of the timestamps has global clock uncertainty issues. + Start time.Time + Duration time.Duration + + // Essentially an extension mechanism. Can be used for many purposes, + // not to be enumerated here. + Tags opentracing.Tags + + // The span's "microlog". + Logs []opentracing.LogRecord +} diff --git a/vendor/github.com/ipfs/go-log/tracer/recorder.go b/vendor/github.com/ipfs/go-log/tracer/recorder.go new file mode 100644 index 0000000000..dbe055a18a --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/recorder.go @@ -0,0 +1,103 @@ +package loggabletracer + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "time" + + writer "github.com/ipfs/go-log/writer" + opentrace "github.com/opentracing/opentracing-go" +) + +// A SpanRecorder handles all of the `RawSpan` data generated via an +// associated `Tracer` (see `NewStandardTracer`) instance. It also names +// the containing process and provides access to a straightforward tag map. +type SpanRecorder interface { + // Implementations must determine whether and where to store `span`. + RecordSpan(span RawSpan) +} + +type LoggableSpanRecorder struct{} + +// NewLoggableRecorder creates new LoggableSpanRecorder +func NewLoggableRecorder() *LoggableSpanRecorder { + return new(LoggableSpanRecorder) +} + +// Loggable Representation of a span, treated as an event log +type LoggableSpan struct { + TraceID uint64 `json:"TraceID"` + SpanID uint64 `json:"SpanID"` + ParentSpanID uint64 `json:"ParentSpanID"` + Operation string `json:"Operation"` + Start time.Time `json:"Start"` + Duration time.Duration `json:"Duration"` + Tags opentrace.Tags `json:"Tags"` + Logs []SpanLog `json:"Logs"` +} + +type SpanLog struct { + Timestamp time.Time `json:"Timestamp"` + Field []SpanField `json:"Fields"` +} + +type SpanField struct { + Key string `json:"Key"` + Value string `json:"Value"` +} + +// RecordSpan implements the respective method of SpanRecorder. +func (r *LoggableSpanRecorder) RecordSpan(span RawSpan) { + // short circuit if theres nothing to write to + if !writer.WriterGroup.Active() { + return + } + + sl := make([]SpanLog, len(span.Logs)) + for i := range span.Logs { + sl[i].Timestamp = span.Logs[i].Timestamp + sf := make([]SpanField, len(span.Logs[i].Fields)) + sl[i].Field = sf + for j := range span.Logs[i].Fields { + sf[j].Key = span.Logs[i].Fields[j].Key() + sf[j].Value = fmt.Sprint(span.Logs[i].Fields[j].Value()) + } + } + + tags := make(map[string]interface{}, len(span.Tags)) + for k, v := range span.Tags { + switch vt := v.(type) { + case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint64: + tags[k] = v + case []byte: + base64.StdEncoding.EncodeToString(vt) + default: + tags[k] = fmt.Sprint(v) + } + } + + spanlog := &LoggableSpan{ + TraceID: span.Context.TraceID, + SpanID: span.Context.SpanID, + ParentSpanID: span.ParentSpanID, + Operation: span.Operation, + Start: span.Start, + Duration: span.Duration, + Tags: tags, + Logs: sl, + } + + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + encoder.SetEscapeHTML(false) + err := encoder.Encode(spanlog) + if err != nil { + fmt.Fprintf(os.Stderr, "ERROR FORMATTING SPAN ENTRY: %s\n", err) + return + } + + _, _ = writer.WriterGroup.Write(buf.Bytes()) +} diff --git a/vendor/github.com/ipfs/go-log/tracer/span.go b/vendor/github.com/ipfs/go-log/tracer/span.go new file mode 100644 index 0000000000..a23a57c32b --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/span.go @@ -0,0 +1,274 @@ +package loggabletracer + +import ( + "sync" + "time" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +// Span provides access to the essential details of the span, for use +// by loggabletracer consumers. These methods may only be called prior +// to (*opentracing.Span).Finish(). +type Span interface { + opentracing.Span + + // Operation names the work done by this span instance + Operation() string + + // Start indicates when the span began + Start() time.Time +} + +// Implements the `Span` interface. Created via LoggableTracer (see +// `loggabletracer.New()`). +type spanImpl struct { + tracer *LoggableTracer + event func(SpanEvent) + sync.Mutex // protects the fields below + raw RawSpan + // The number of logs dropped because of MaxLogsPerSpan. + numDroppedLogs int +} + +var spanPool = &sync.Pool{New: func() interface{} { + return &spanImpl{} +}} + +func (s *spanImpl) reset() { + s.tracer, s.event = nil, nil + // Note: Would like to do the following, but then the consumer of RawSpan + // (the recorder) needs to make sure that they're not holding on to the + // baggage or logs when they return (i.e. they need to copy if they care): + // + // logs, baggage := s.raw.Logs[:0], s.raw.Baggage + // for k := range baggage { + // delete(baggage, k) + // } + // s.raw.Logs, s.raw.Baggage = logs, baggage + // + // That's likely too much to ask for. But there is some magic we should + // be able to do with `runtime.SetFinalizer` to reclaim that memory into + // a buffer pool when GC considers them unreachable, which should ease + // some of the load. Hard to say how quickly that would be in practice + // though. + s.raw = RawSpan{ + Context: SpanContext{}, + } +} + +func (s *spanImpl) SetOperationName(operationName string) opentracing.Span { + s.Lock() + defer s.Unlock() + s.raw.Operation = operationName + return s +} + +func (s *spanImpl) trim() bool { + return !s.raw.Context.Sampled && s.tracer.options.TrimUnsampledSpans +} + +func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span { + defer s.onTag(key, value) + s.Lock() + defer s.Unlock() + if key == string(ext.SamplingPriority) { + if v, ok := value.(uint16); ok { + s.raw.Context.Sampled = v != 0 + return s + } + } + if s.trim() { + return s + } + + if s.raw.Tags == nil { + s.raw.Tags = opentracing.Tags{} + } + s.raw.Tags[key] = value + return s +} + +func (s *spanImpl) LogKV(keyValues ...interface{}) { + fields, err := log.InterleavedKVToFields(keyValues...) + if err != nil { + s.LogFields(log.Error(err), log.String("function", "LogKV")) + return + } + s.LogFields(fields...) +} + +func (s *spanImpl) appendLog(lr opentracing.LogRecord) { + maxLogs := s.tracer.options.MaxLogsPerSpan + if maxLogs == 0 || len(s.raw.Logs) < maxLogs { + s.raw.Logs = append(s.raw.Logs, lr) + return + } + + // We have too many logs. We don't touch the first numOld logs; we treat the + // rest as a circular buffer and overwrite the oldest log among those. + numOld := (maxLogs - 1) / 2 + numNew := maxLogs - numOld + s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr + s.numDroppedLogs++ +} + +func (s *spanImpl) LogFields(fields ...log.Field) { + lr := opentracing.LogRecord{ + Fields: fields, + } + defer s.onLogFields(lr) + s.Lock() + defer s.Unlock() + if s.trim() || s.tracer.options.DropAllLogs { + return + } + if lr.Timestamp.IsZero() { + lr.Timestamp = time.Now() + } + s.appendLog(lr) +} + +func (s *spanImpl) LogEvent(event string) { + s.Log(opentracing.LogData{ + Event: event, + }) +} + +func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) { + s.Log(opentracing.LogData{ + Event: event, + Payload: payload, + }) +} + +func (s *spanImpl) Log(ld opentracing.LogData) { + defer s.onLog(ld) + s.Lock() + defer s.Unlock() + if s.trim() || s.tracer.options.DropAllLogs { + return + } + + if ld.Timestamp.IsZero() { + ld.Timestamp = time.Now() + } + + s.appendLog(ld.ToLogRecord()) +} + +func (s *spanImpl) Finish() { + s.FinishWithOptions(opentracing.FinishOptions{}) +} + +// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at +// the end (i.e. pos circular left shifts). +func rotateLogBuffer(buf []opentracing.LogRecord, pos int) { + // This algorithm is described in: + // http://www.cplusplus.com/reference/algorithm/rotate + for first, middle, next := 0, pos, pos; first != middle; { + buf[first], buf[next] = buf[next], buf[first] + first++ + next++ + if next == len(buf) { + next = middle + } else if first == middle { + middle = next + } + } +} + +func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) { + finishTime := opts.FinishTime + if finishTime.IsZero() { + finishTime = time.Now() + } + duration := finishTime.Sub(s.raw.Start) + + s.Lock() + defer s.Unlock() + + for _, lr := range opts.LogRecords { + s.appendLog(lr) + } + for _, ld := range opts.BulkLogData { + s.appendLog(ld.ToLogRecord()) + } + + if s.numDroppedLogs > 0 { + // We dropped some log events, which means that we used part of Logs as a + // circular buffer (see appendLog). De-circularize it. + numOld := (len(s.raw.Logs) - 1) / 2 + numNew := len(s.raw.Logs) - numOld + rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew) + + // Replace the log in the middle (the oldest "new" log) with information + // about the dropped logs. This means that we are effectively dropping one + // more "new" log. + numDropped := s.numDroppedLogs + 1 + s.raw.Logs[numOld] = opentracing.LogRecord{ + // Keep the timestamp of the last dropped event. + Timestamp: s.raw.Logs[numOld].Timestamp, + Fields: []log.Field{ + log.String("event", "dropped Span logs"), + log.Int("dropped_log_count", numDropped), + log.String("component", "loggabletracer"), + }, + } + } + + s.raw.Duration = duration + + s.onFinish(s.raw) + s.tracer.options.Recorder.RecordSpan(s.raw) + + // Last chance to get options before the span is possibly reset. + poolEnabled := s.tracer.options.EnableSpanPool + if s.tracer.options.DebugAssertUseAfterFinish { + // This makes it much more likely to catch a panic on any subsequent + // operation since s.tracer is accessed on every call to `Lock`. + // We don't call `reset()` here to preserve the logs in the Span + // which are printed when the assertion triggers. + s.tracer = nil + } + + if poolEnabled { + spanPool.Put(s) + } +} + +func (s *spanImpl) Tracer() opentracing.Tracer { + return s.tracer +} + +func (s *spanImpl) Context() opentracing.SpanContext { + return s.raw.Context +} + +func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span { + s.onBaggage(key, val) + if s.trim() { + return s + } + + s.Lock() + defer s.Unlock() + s.raw.Context = s.raw.Context.WithBaggageItem(key, val) + return s +} + +func (s *spanImpl) BaggageItem(key string) string { + s.Lock() + defer s.Unlock() + return s.raw.Context.Baggage[key] +} + +func (s *spanImpl) Operation() string { + return s.raw.Operation +} + +func (s *spanImpl) Start() time.Time { + return s.raw.Start +} diff --git a/vendor/github.com/ipfs/go-log/tracer/tracer.go b/vendor/github.com/ipfs/go-log/tracer/tracer.go new file mode 100644 index 0000000000..a6ea3a22ed --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/tracer.go @@ -0,0 +1,280 @@ +package loggabletracer + +import ( + "time" + + writer "github.com/ipfs/go-log/writer" + opentracing "github.com/opentracing/opentracing-go" +) + +// Tracer extends the opentracing.Tracer interface with methods to +// probe implementation state, for use by loggabletracer consumers. +type Tracer interface { + opentracing.Tracer + + // Options gets the Options used in New() or NewWithOptions(). + Options() Options +} + +// Options allows creating a customized Tracer via NewWithOptions. The object +// must not be updated when there is an active tracer using it. +type Options struct { + // ShouldSample is a function which is called when creating a new Span and + // determines whether that Span is sampled. The randomized TraceID is supplied + // to allow deterministic sampling decisions to be made across different nodes. + // For example, + // + // func(traceID uint64) { return traceID % 64 == 0 } + // + // samples every 64th trace on average. + ShouldSample func(traceID uint64) bool + // TrimUnsampledSpans turns potentially expensive operations on unsampled + // Spans into no-ops. More precisely, tags and log events are silently + // discarded. If NewSpanEventListener is set, the callbacks will still fire. + TrimUnsampledSpans bool + // Recorder receives Spans which have been finished. + Recorder SpanRecorder + // NewSpanEventListener can be used to enhance the tracer by effectively + // attaching external code to trace events. See NetTraceIntegrator for a + // practical example, and event.go for the list of possible events. + NewSpanEventListener func() func(SpanEvent) + // DropAllLogs turns log events on all Spans into no-ops. + // If NewSpanEventListener is set, the callbacks will still fire. + DropAllLogs bool + // MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero + // value). If a span has more logs than this value, logs are dropped as + // necessary (and replaced with a log describing how many were dropped). + // + // About half of the MaxLogPerSpan logs kept are the oldest logs, and about + // half are the newest logs. + // + // If NewSpanEventListener is set, the callbacks will still fire for all log + // events. This value is ignored if DropAllLogs is true. + MaxLogsPerSpan int + // DebugAssertSingleGoroutine internally records the ID of the goroutine + // creating each Span and verifies that no operation is carried out on + // it on a different goroutine. + // Provided strictly for development purposes. + // Passing Spans between goroutine without proper synchronization often + // results in use-after-Finish() errors. For a simple example, consider the + // following pseudocode: + // + // func (s *Server) Handle(req http.Request) error { + // sp := s.StartSpan("server") + // defer sp.Finish() + // wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req) + // select { + // case resp := <-wait: + // return resp.Error + // case <-time.After(10*time.Second): + // sp.LogEvent("timed out waiting for processing") + // return ErrTimedOut + // } + // } + // + // This looks reasonable at first, but a request which spends more than ten + // seconds in the queue is abandoned by the main goroutine and its trace + // finished, leading to use-after-finish when the request is finally + // processed. Note also that even joining on to a finished Span via + // StartSpanWithOptions constitutes an illegal operation. + // + // Code bases which do not require (or decide they do not want) Spans to + // be passed across goroutine boundaries can run with this flag enabled in + // tests to increase their chances of spotting wrong-doers. + DebugAssertSingleGoroutine bool + // DebugAssertUseAfterFinish is provided strictly for development purposes. + // When set, it attempts to exacerbate issues emanating from use of Spans + // after calling Finish by running additional assertions. + DebugAssertUseAfterFinish bool + // EnableSpanPool enables the use of a pool, so that the tracer reuses spans + // after Finish has been called on it. Adds a slight performance gain as it + // reduces allocations. However, if you have any use-after-finish race + // conditions the code may panic. + EnableSpanPool bool +} + +// DefaultOptions returns an Options object with a 1 in 64 sampling rate and +// all options disabled. A Recorder needs to be set manually before using the +// returned object with a Tracer. +func DefaultOptions() Options { + return Options{ + ShouldSample: func(traceID uint64) bool { return traceID%64 == 0 }, + MaxLogsPerSpan: 100, + } +} + +// NewWithOptions creates a customized Tracer. +func NewWithOptions(opts Options) opentracing.Tracer { + rval := &LoggableTracer{options: opts} + rval.accessorPropagator = &accessorPropagator{rval} + return rval +} + +// New creates and returns a standard Tracer which defers completed Spans to +// `recorder`. +// Spans created by this Tracer support the ext.SamplingPriority tag: Setting +// ext.SamplingPriority causes the Span to be Sampled from that point on. +func New(recorder SpanRecorder) opentracing.Tracer { + opts := DefaultOptions() + opts.Recorder = recorder + return NewWithOptions(opts) +} + +// Implements the `Tracer` interface. +type LoggableTracer struct { + options Options + textPropagator *textMapPropagator + binaryPropagator *binaryPropagator + accessorPropagator *accessorPropagator +} + +func (t *LoggableTracer) StartSpan( + operationName string, + opts ...opentracing.StartSpanOption, +) opentracing.Span { + + if !writer.WriterGroup.Active() { + return opentracing.NoopTracer.StartSpan(opentracing.NoopTracer{}, operationName) + } + + sso := opentracing.StartSpanOptions{} + for _, o := range opts { + o.Apply(&sso) + } + return t.StartSpanWithOptions(operationName, sso) +} + +func (t *LoggableTracer) getSpan() *spanImpl { + if t.options.EnableSpanPool { + sp := spanPool.Get().(*spanImpl) + sp.reset() + return sp + } + return &spanImpl{} +} + +func (t *LoggableTracer) StartSpanWithOptions( + operationName string, + opts opentracing.StartSpanOptions, +) opentracing.Span { + if !writer.WriterGroup.Active() { + return opentracing.NoopTracer.StartSpan(opentracing.NoopTracer{}, operationName) + } + // Start time. + startTime := opts.StartTime + if startTime.IsZero() { + startTime = time.Now() + } + + // Tags. + tags := opts.Tags + + // Build the new span. This is the only allocation: We'll return this as + // an opentracing.Span. + sp := t.getSpan() + // Look for a parent in the list of References. + // + // TODO: would be nice if loggabletracer did something with all + // References, not just the first one. +ReferencesLoop: + for _, ref := range opts.References { + switch ref.Type { + case opentracing.ChildOfRef, + opentracing.FollowsFromRef: + + refCtx, ok := ref.ReferencedContext.(SpanContext) + if !ok { + // Could be a noopSpanContext + // Ignore that parent. + continue + } + sp.raw.Context.TraceID = refCtx.TraceID + sp.raw.Context.SpanID = randomID() + sp.raw.Context.Sampled = refCtx.Sampled + sp.raw.ParentSpanID = refCtx.SpanID + + if l := len(refCtx.Baggage); l > 0 { + sp.raw.Context.Baggage = make(map[string]string, l) + for k, v := range refCtx.Baggage { + sp.raw.Context.Baggage[k] = v + } + } + break ReferencesLoop + } + } + if sp.raw.Context.TraceID == 0 { + // No parent Span found; allocate new trace and span ids and determine + // the Sampled status. + sp.raw.Context.TraceID, sp.raw.Context.SpanID = randomID2() + sp.raw.Context.Sampled = t.options.ShouldSample(sp.raw.Context.TraceID) + } + + return t.startSpanInternal( + sp, + operationName, + startTime, + tags, + ) +} + +func (t *LoggableTracer) startSpanInternal( + sp *spanImpl, + operationName string, + startTime time.Time, + tags opentracing.Tags, +) opentracing.Span { + sp.tracer = t + if t.options.NewSpanEventListener != nil { + sp.event = t.options.NewSpanEventListener() + } + sp.raw.Operation = operationName + sp.raw.Start = startTime + sp.raw.Duration = -1 + sp.raw.Tags = tags + if t.options.DebugAssertSingleGoroutine { + sp.SetTag(debugGoroutineIDTag, curGoroutineID()) + } + defer sp.onCreate(operationName) + return sp +} + +type delegatorType struct{} + +// Delegator is the format to use for DelegatingCarrier. +var Delegator delegatorType + +func (t *LoggableTracer) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error { + if !writer.WriterGroup.Active() { + return opentracing.NoopTracer.Inject(opentracing.NoopTracer{}, sc, format, carrier) + } + switch format { + case opentracing.TextMap, opentracing.HTTPHeaders: + return t.textPropagator.Inject(sc, carrier) + case opentracing.Binary: + return t.binaryPropagator.Inject(sc, carrier) + } + if _, ok := format.(delegatorType); ok { + return t.accessorPropagator.Inject(sc, carrier) + } + return opentracing.ErrUnsupportedFormat +} + +func (t *LoggableTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { + if !writer.WriterGroup.Active() { + return opentracing.NoopTracer.Extract(opentracing.NoopTracer{}, format, carrier) + } + switch format { + case opentracing.TextMap, opentracing.HTTPHeaders: + return t.textPropagator.Extract(carrier) + case opentracing.Binary: + return t.binaryPropagator.Extract(carrier) + } + if _, ok := format.(delegatorType); ok { + return t.accessorPropagator.Extract(carrier) + } + return nil, opentracing.ErrUnsupportedFormat +} + +func (t *LoggableTracer) Options() Options { + return t.options +} diff --git a/vendor/github.com/ipfs/go-log/tracer/util.go b/vendor/github.com/ipfs/go-log/tracer/util.go new file mode 100644 index 0000000000..279e2acaad --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/util.go @@ -0,0 +1,25 @@ +package loggabletracer + +import ( + "math/rand" + "sync" + "time" +) + +var ( + seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) + // The golang rand generators are *not* intrinsically thread-safe. + seededIDLock sync.Mutex +) + +func randomID() uint64 { + seededIDLock.Lock() + defer seededIDLock.Unlock() + return uint64(seededIDGen.Int63()) +} + +func randomID2() (uint64, uint64) { + seededIDLock.Lock() + defer seededIDLock.Unlock() + return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63()) +} diff --git a/vendor/github.com/ipfs/go-log/tracer/wire/Makefile b/vendor/github.com/ipfs/go-log/tracer/wire/Makefile new file mode 100644 index 0000000000..8677a37114 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/wire/Makefile @@ -0,0 +1,6 @@ +pbgos := $(patsubst %.proto,%.pb.go,$(wildcard *.proto)) + +all: $(pbgos) + +%.pb.go: %.proto + protoc --gogofaster_out=. --proto_path=$(GOPATH)/src:. $< diff --git a/vendor/github.com/ipfs/go-log/tracer/wire/carrier.go b/vendor/github.com/ipfs/go-log/tracer/wire/carrier.go new file mode 100644 index 0000000000..12ec98e906 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/wire/carrier.go @@ -0,0 +1,40 @@ +package wire + +// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the +// the underlying datastructure. The reason for implementing DelagatingCarrier +// is to allow for end users to serialize the underlying protocol buffers using +// jsonpb or any other serialization forms they want. +type ProtobufCarrier TracerState + +// SetState set's the tracer state. +func (p *ProtobufCarrier) SetState(traceID, spanID uint64, sampled bool) { + p.TraceId = traceID + p.SpanId = spanID + p.Sampled = sampled +} + +// State returns the tracer state. +func (p *ProtobufCarrier) State() (traceID, spanID uint64, sampled bool) { + traceID = p.TraceId + spanID = p.SpanId + sampled = p.Sampled + return traceID, spanID, sampled +} + +// SetBaggageItem sets a baggage item. +func (p *ProtobufCarrier) SetBaggageItem(key, value string) { + if p.BaggageItems == nil { + p.BaggageItems = map[string]string{key: value} + return + } + + p.BaggageItems[key] = value +} + +// GetBaggage iterates over each baggage item and executes the callback with +// the key:value pair. +func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) { + for k, v := range p.BaggageItems { + f(k, v) + } +} diff --git a/vendor/github.com/ipfs/go-log/tracer/wire/gen.go b/vendor/github.com/ipfs/go-log/tracer/wire/gen.go new file mode 100644 index 0000000000..7d951fa43f --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/wire/gen.go @@ -0,0 +1,6 @@ +package wire + +//go:generate protoc --gogofaster_out=$GOPATH/src/github.com/ipfs/go-log/tracer/wire wire.proto + +// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the +// gogofaster generator binary. diff --git a/vendor/github.com/ipfs/go-log/tracer/wire/wire.pb.go b/vendor/github.com/ipfs/go-log/tracer/wire/wire.pb.go new file mode 100644 index 0000000000..0bbf3f1b60 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/wire/wire.pb.go @@ -0,0 +1,531 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: wire.proto + +package wire + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type TracerState struct { + TraceId uint64 `protobuf:"fixed64,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + Sampled bool `protobuf:"varint,3,opt,name=sampled,proto3" json:"sampled,omitempty"` + BaggageItems map[string]string `protobuf:"bytes,4,rep,name=baggage_items,json=baggageItems,proto3" json:"baggage_items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TracerState) Reset() { *m = TracerState{} } +func (m *TracerState) String() string { return proto.CompactTextString(m) } +func (*TracerState) ProtoMessage() {} +func (*TracerState) Descriptor() ([]byte, []int) { + return fileDescriptor_f2dcdddcdf68d8e0, []int{0} +} +func (m *TracerState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TracerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TracerState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TracerState) XXX_Merge(src proto.Message) { + xxx_messageInfo_TracerState.Merge(m, src) +} +func (m *TracerState) XXX_Size() int { + return m.Size() +} +func (m *TracerState) XXX_DiscardUnknown() { + xxx_messageInfo_TracerState.DiscardUnknown(m) +} + +var xxx_messageInfo_TracerState proto.InternalMessageInfo + +func (m *TracerState) GetTraceId() uint64 { + if m != nil { + return m.TraceId + } + return 0 +} + +func (m *TracerState) GetSpanId() uint64 { + if m != nil { + return m.SpanId + } + return 0 +} + +func (m *TracerState) GetSampled() bool { + if m != nil { + return m.Sampled + } + return false +} + +func (m *TracerState) GetBaggageItems() map[string]string { + if m != nil { + return m.BaggageItems + } + return nil +} + +func init() { + proto.RegisterType((*TracerState)(nil), "loggabletracer.wire.TracerState") + proto.RegisterMapType((map[string]string)(nil), "loggabletracer.wire.TracerState.BaggageItemsEntry") +} + +func init() { proto.RegisterFile("wire.proto", fileDescriptor_f2dcdddcdf68d8e0) } + +var fileDescriptor_f2dcdddcdf68d8e0 = []byte{ + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xce, 0xc9, 0x4f, 0x4f, 0x4f, 0x4c, 0xca, 0x49, + 0x2d, 0x29, 0x4a, 0x4c, 0x4e, 0x2d, 0xd2, 0x03, 0x49, 0x29, 0x7d, 0x65, 0xe4, 0xe2, 0x0e, 0x01, + 0xf3, 0x83, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0x24, 0xb9, 0x38, 0xc0, 0xd2, 0xf1, 0x99, 0x29, 0x12, + 0x8c, 0x0a, 0x8c, 0x1a, 0x6c, 0x41, 0xec, 0x60, 0xbe, 0x67, 0x8a, 0x90, 0x38, 0x17, 0x7b, 0x71, + 0x41, 0x62, 0x1e, 0x48, 0x86, 0x09, 0x2c, 0xc3, 0x06, 0xe2, 0x7a, 0xa6, 0x08, 0x49, 0x70, 0xb1, + 0x17, 0x27, 0xe6, 0x16, 0xe4, 0xa4, 0xa6, 0x48, 0x30, 0x2b, 0x30, 0x6a, 0x70, 0x04, 0xc1, 0xb8, + 0x42, 0xe1, 0x5c, 0xbc, 0x49, 0x89, 0xe9, 0xe9, 0x89, 0xe9, 0xa9, 0xf1, 0x99, 0x25, 0xa9, 0xb9, + 0xc5, 0x12, 0x2c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x46, 0x7a, 0x58, 0x9c, 0xa2, 0x87, 0xe4, 0x0c, + 0x3d, 0x27, 0x88, 0x2e, 0x4f, 0x90, 0x26, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x9e, 0x24, 0x24, + 0x21, 0x29, 0x7b, 0x2e, 0x41, 0x0c, 0x25, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x60, 0x67, + 0x73, 0x06, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x60, 0x07, 0x73, + 0x06, 0x41, 0x38, 0x56, 0x4c, 0x16, 0x8c, 0x4e, 0x72, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, + 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, + 0x2c, 0xc7, 0x10, 0xc5, 0x02, 0x72, 0x4c, 0x12, 0x1b, 0x38, 0xcc, 0x8c, 0x01, 0x01, 0x00, 0x00, + 0xff, 0xff, 0xe4, 0x48, 0xf4, 0xf8, 0x41, 0x01, 0x00, 0x00, +} + +func (m *TracerState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracerState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TracerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BaggageItems) > 0 { + for k := range m.BaggageItems { + v := m.BaggageItems[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintWire(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWire(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWire(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.Sampled { + i-- + if m.Sampled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.SpanId != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.SpanId)) + i-- + dAtA[i] = 0x11 + } + if m.TraceId != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TraceId)) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func encodeVarintWire(dAtA []byte, offset int, v uint64) int { + offset -= sovWire(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TracerState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TraceId != 0 { + n += 9 + } + if m.SpanId != 0 { + n += 9 + } + if m.Sampled { + n += 2 + } + if len(m.BaggageItems) > 0 { + for k, v := range m.BaggageItems { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v))) + n += mapEntrySize + 1 + sovWire(uint64(mapEntrySize)) + } + } + return n +} + +func sovWire(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWire(x uint64) (n int) { + return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TracerState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TracerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TracerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + m.TraceId = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TraceId = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + m.SpanId = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.SpanId = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sampled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Sampled = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaggageItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BaggageItems == nil { + m.BaggageItems = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWire + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWire + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthWire + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthWire + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.BaggageItems[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWire(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWire + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWire + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWire + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWire = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/ipfs/go-log/tracer/wire/wire.proto b/vendor/github.com/ipfs/go-log/tracer/wire/wire.proto new file mode 100644 index 0000000000..496fa19817 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/tracer/wire/wire.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package loggabletracer.wire; +option go_package = "wire"; + +message TracerState { + fixed64 trace_id = 1; + fixed64 span_id = 2; + bool sampled = 3; + map baggage_items = 4; +} diff --git a/vendor/github.com/ipfs/go-log/v2/LICENSE b/vendor/github.com/ipfs/go-log/v2/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipfs/go-log/v2/README.md b/vendor/github.com/ipfs/go-log/v2/README.md new file mode 100644 index 0000000000..eb98f57770 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/README.md @@ -0,0 +1,66 @@ +# go-log + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/ipfs/go-log?status.svg)](https://godoc.org/github.com/ipfs/go-log) +[![Build Status](https://travis-ci.org/ipfs/go-log.svg?branch=master)](https://travis-ci.org/ipfs/go-log) + + + + +> The logging library used by go-ipfs + +go-log wraps [zap](https://github.com/uber-go/zap) to provide a logging facade. go-log manages logging +instances and allows for their levels to be controlled individually. + +## Install + +```sh +go get github.com/ipfs/go-log +``` + +## Usage + +Once the package is imported under the name `logging`, an instance of `EventLogger` can be created like so: + +```go +var log = logging.Logger("subsystem name") +``` + +It can then be used to emit log messages in plain printf-style messages at seven standard levels: + +Levels may be set for all loggers: + +```go +lvl, err := logging.LevelFromString("error") + if err != nil { + panic(err) + } +logging.SetAllLoggers(lvl) +``` + +or individually: + +```go +lvl, err := logging.LevelFromString("error") + if err != nil { + panic(err) + } +logging.SetLogLevel("foo", "info") +``` + +## Contribute + +Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-log/issues)! + +This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +### Want to hack on IPFS? + +[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md) + +## License + +MIT diff --git a/vendor/github.com/ipfs/go-log/v2/core.go b/vendor/github.com/ipfs/go-log/v2/core.go new file mode 100644 index 0000000000..87e7d9c262 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/core.go @@ -0,0 +1,120 @@ +package log + +import ( + "sync" + + "go.uber.org/multierr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var _ zapcore.Core = (*lockedMultiCore)(nil) + +type lockedMultiCore struct { + mu sync.RWMutex // guards mutations to cores slice + cores []zapcore.Core +} + +func (l *lockedMultiCore) With(fields []zapcore.Field) zapcore.Core { + l.mu.RLock() + defer l.mu.RUnlock() + sub := &lockedMultiCore{ + cores: make([]zapcore.Core, len(l.cores)), + } + for i := range l.cores { + sub.cores[i] = l.cores[i].With(fields) + } + return sub +} + +func (l *lockedMultiCore) Enabled(lvl zapcore.Level) bool { + l.mu.RLock() + defer l.mu.RUnlock() + for i := range l.cores { + if l.cores[i].Enabled(lvl) { + return true + } + } + return false +} + +func (l *lockedMultiCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + l.mu.RLock() + defer l.mu.RUnlock() + for i := range l.cores { + ce = l.cores[i].Check(ent, ce) + } + return ce +} + +func (l *lockedMultiCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { + l.mu.RLock() + defer l.mu.RUnlock() + var err error + for i := range l.cores { + err = multierr.Append(err, l.cores[i].Write(ent, fields)) + } + return err +} + +func (l *lockedMultiCore) Sync() error { + l.mu.RLock() + defer l.mu.RUnlock() + var err error + for i := range l.cores { + err = multierr.Append(err, l.cores[i].Sync()) + } + return err +} + +func (l *lockedMultiCore) AddCore(core zapcore.Core) { + l.mu.Lock() + defer l.mu.Unlock() + + l.cores = append(l.cores, core) +} + +func (l *lockedMultiCore) DeleteCore(core zapcore.Core) { + l.mu.Lock() + defer l.mu.Unlock() + + w := 0 + for i := 0; i < len(l.cores); i++ { + if l.cores[i] == core { + continue + } + l.cores[w] = l.cores[i] + w++ + } + l.cores = l.cores[:w] +} + +func (l *lockedMultiCore) ReplaceCore(original, replacement zapcore.Core) { + l.mu.Lock() + defer l.mu.Unlock() + + for i := 0; i < len(l.cores); i++ { + if l.cores[i] == original { + l.cores[i] = replacement + } + } +} + +func newCore(format LogFormat, ws zapcore.WriteSyncer, level LogLevel) zapcore.Core { + encCfg := zap.NewProductionEncoderConfig() + encCfg.EncodeTime = zapcore.ISO8601TimeEncoder + + var encoder zapcore.Encoder + switch format { + case PlaintextOutput: + encCfg.EncodeLevel = zapcore.CapitalLevelEncoder + encoder = zapcore.NewConsoleEncoder(encCfg) + case JSONOutput: + encoder = zapcore.NewJSONEncoder(encCfg) + default: + encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder + encoder = zapcore.NewConsoleEncoder(encCfg) + } + + return zapcore.NewCore(encoder, ws, zap.NewAtomicLevelAt(zapcore.Level(level))) +} diff --git a/vendor/github.com/ipfs/go-log/v2/go.mod b/vendor/github.com/ipfs/go-log/v2/go.mod new file mode 100644 index 0000000000..9688c5b574 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/go.mod @@ -0,0 +1,8 @@ +module github.com/ipfs/go-log/v2 + +require ( + go.uber.org/multierr v1.5.0 + go.uber.org/zap v1.14.1 +) + +go 1.12 diff --git a/vendor/github.com/ipfs/go-log/v2/go.sum b/vendor/github.com/ipfs/go-log/v2/go.sum new file mode 100644 index 0000000000..f07a2f686b --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/go.sum @@ -0,0 +1,60 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/vendor/github.com/ipfs/go-log/v2/levels.go b/vendor/github.com/ipfs/go-log/v2/levels.go new file mode 100644 index 0000000000..9d43a597a1 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/levels.go @@ -0,0 +1,30 @@ +package log + +import "go.uber.org/zap/zapcore" + +// LogLevel represents a log severity level. Use the package variables as an +// enum. +type LogLevel zapcore.Level + +var ( + LevelDebug = LogLevel(zapcore.DebugLevel) + LevelInfo = LogLevel(zapcore.InfoLevel) + LevelWarn = LogLevel(zapcore.WarnLevel) + LevelError = LogLevel(zapcore.ErrorLevel) + LevelDPanic = LogLevel(zapcore.DPanicLevel) + LevelPanic = LogLevel(zapcore.PanicLevel) + LevelFatal = LogLevel(zapcore.FatalLevel) +) + +// LevelFromString parses a string-based level and returns the corresponding +// LogLevel. +// +// Supported strings are: DEBUG, INFO, WARN, ERROR, DPANIC, PANIC, FATAL, and +// their lower-case forms. +// +// The returned LogLevel must be discarded if error is not nil. +func LevelFromString(level string) (LogLevel, error) { + lvl := zapcore.InfoLevel // zero value + err := lvl.Set(level) + return LogLevel(lvl), err +} diff --git a/vendor/github.com/ipfs/go-log/v2/log.go b/vendor/github.com/ipfs/go-log/v2/log.go new file mode 100644 index 0000000000..784e9e5e4d --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/log.go @@ -0,0 +1,75 @@ +// Package log is the logging library used by IPFS & libp2p +// (https://github.com/ipfs/go-ipfs). +package log + +import ( + "time" + + "go.uber.org/zap" +) + +// StandardLogger provides API compatibility with standard printf loggers +// eg. go-logging +type StandardLogger interface { + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) +} + +// EventLogger extends the StandardLogger interface to allow for log items +// containing structured metadata +type EventLogger interface { + StandardLogger +} + +// Logger retrieves an event logger by name +func Logger(system string) *ZapEventLogger { + if len(system) == 0 { + setuplog := getLogger("setup-logger") + setuplog.Error("Missing name parameter") + system = "undefined" + } + + logger := getLogger(system) + skipLogger := logger.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar() + + return &ZapEventLogger{ + system: system, + SugaredLogger: *logger, + skipLogger: *skipLogger, + } +} + +// ZapEventLogger implements the EventLogger and wraps a go-logging Logger +type ZapEventLogger struct { + zap.SugaredLogger + // used to fix the caller location when calling Warning and Warningf. + skipLogger zap.SugaredLogger + system string +} + +// Warning is for compatibility +// Deprecated: use Warn(args ...interface{}) instead +func (logger *ZapEventLogger) Warning(args ...interface{}) { + logger.skipLogger.Warn(args...) +} + +// Warningf is for compatibility +// Deprecated: use Warnf(format string, args ...interface{}) instead +func (logger *ZapEventLogger) Warningf(format string, args ...interface{}) { + logger.skipLogger.Warnf(format, args...) +} + +// FormatRFC3339 returns the given time in UTC with RFC3999Nano format. +func FormatRFC3339(t time.Time) string { + return t.UTC().Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/ipfs/go-log/v2/package.json b/vendor/github.com/ipfs/go-log/v2/package.json new file mode 100644 index 0000000000..adcf8cd08e --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/package.json @@ -0,0 +1,41 @@ +{ + "bugs": { + "url": "https://github.com/ipfs/go-log" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-log" + }, + "gxDependencies": [ + { + "author": "whyrusleeping", + "hash": "QmcaSwFc5RBg8yCq54QURwEU4nwjfCpjbpmaAm4VbdGLKv", + "name": "go-logging", + "version": "0.0.0" + }, + { + "author": "frist", + "hash": "QmWLWmRVSiagqP15jczsGME1qpob6HDbtbHAY2he9W5iUo", + "name": "opentracing-go", + "version": "0.0.3" + }, + { + "author": "mattn", + "hash": "QmTsHcKgTQ4VeYZd8eKYpTXeLW7KNwkRD9wjnrwsV2sToq", + "name": "go-colorable", + "version": "0.2.0" + }, + { + "author": "whyrusleeping", + "hash": "QmddjPSGZb3ieihSseFeCfVRpZzcqczPNsD2DvarSwnjJB", + "name": "gogo-protobuf", + "version": "1.2.1" + } + ], + "gxVersion": "0.12.1", + "language": "go", + "license": "", + "name": "go-log", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.5.9" +} + diff --git a/vendor/github.com/ipfs/go-log/v2/path_other.go b/vendor/github.com/ipfs/go-log/v2/path_other.go new file mode 100644 index 0000000000..94d339878e --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/path_other.go @@ -0,0 +1,11 @@ +//+build !windows + +package log + +import ( + "path/filepath" +) + +func normalizePath(p string) (string, error) { + return filepath.Abs(p) +} diff --git a/vendor/github.com/ipfs/go-log/v2/path_windows.go b/vendor/github.com/ipfs/go-log/v2/path_windows.go new file mode 100644 index 0000000000..08bb1f2886 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/path_windows.go @@ -0,0 +1,35 @@ +//+build windows + +package log + +import ( + "fmt" + "path/filepath" + "strings" +) + +func normalizePath(p string) (string, error) { + if p == "" { + return "", fmt.Errorf("path empty") + } + p, err := filepath.Abs(p) + if err != nil { + return "", err + } + // Is this _really_ an absolute path? + if !strings.HasPrefix(p, "\\\\") { + // It's a drive: path! + // Return a UNC path. + p = "\\\\%3F\\" + p + } + + // This will return file:////?/c:/foobar + // + // Why? Because: + // 1. Go will choke on file://c:/ because the "domain" includes a :. + // 2. Windows will choke on file:///c:/ because the path will be + // /c:/... which is _relative_ to the current drive. + // + // This path (a) has no "domain" and (b) starts with a slash. Yay! + return "file://" + filepath.ToSlash(p), nil +} diff --git a/vendor/github.com/ipfs/go-log/v2/pipe.go b/vendor/github.com/ipfs/go-log/v2/pipe.go new file mode 100644 index 0000000000..7435b9dc72 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/pipe.go @@ -0,0 +1,90 @@ +package log + +import ( + "io" + + "go.uber.org/multierr" + "go.uber.org/zap/zapcore" +) + +// A PipeReader is a reader that reads from the logger. It is synchronous +// so blocking on read will affect logging performance. +type PipeReader struct { + r *io.PipeReader + closer io.Closer + core zapcore.Core +} + +// Read implements the standard Read interface +func (p *PipeReader) Read(data []byte) (int, error) { + return p.r.Read(data) +} + +// Close unregisters the reader from the logger. +func (p *PipeReader) Close() error { + if p.core != nil { + loggerCore.DeleteCore(p.core) + } + return multierr.Append(p.core.Sync(), p.closer.Close()) +} + +// NewPipeReader creates a new in-memory reader that reads from all loggers +// The caller must call Close on the returned reader when done. +// +// By default, it: +// +// 1. Logs JSON. This can be changed by passing the PipeFormat option. +// 2. Logs everything that would otherwise be logged to the "primary" log +// output. That is, everything enabled by SetLogLevel. The minimum log level +// can be increased by passing the PipeLevel option. +func NewPipeReader(opts ...PipeReaderOption) *PipeReader { + opt := pipeReaderOptions{ + format: JSONOutput, + level: LevelDebug, + } + + for _, o := range opts { + o.setOption(&opt) + } + + r, w := io.Pipe() + + p := &PipeReader{ + r: r, + closer: w, + core: newCore(opt.format, zapcore.AddSync(w), opt.level), + } + + loggerCore.AddCore(p.core) + + return p +} + +type pipeReaderOptions struct { + format LogFormat + level LogLevel +} + +type PipeReaderOption interface { + setOption(*pipeReaderOptions) +} + +type pipeReaderOptionFunc func(*pipeReaderOptions) + +func (p pipeReaderOptionFunc) setOption(o *pipeReaderOptions) { + p(o) +} + +// PipeFormat sets the output format of the pipe reader +func PipeFormat(format LogFormat) PipeReaderOption { + return pipeReaderOptionFunc(func(o *pipeReaderOptions) { + o.format = format + }) +} + +// PipeLevel sets the log level of logs sent to the pipe reader. +func PipeLevel(level LogLevel) PipeReaderOption { + return pipeReaderOptionFunc(func(o *pipeReaderOptions) { + o.level = level + }) +} diff --git a/vendor/github.com/ipfs/go-log/v2/setup.go b/vendor/github.com/ipfs/go-log/v2/setup.go new file mode 100644 index 0000000000..801e5b3f2b --- /dev/null +++ b/vendor/github.com/ipfs/go-log/v2/setup.go @@ -0,0 +1,284 @@ +package log + +import ( + "errors" + "fmt" + "os" + "regexp" + "strings" + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func init() { + SetupLogging(configFromEnv()) +} + +// Logging environment variables +const ( + // IPFS_* prefixed env vars kept for backwards compatibility + // for this release. They will not be available in the next + // release. + // + // GOLOG_* env vars take precedences over IPFS_* env vars. + envIPFSLogging = "IPFS_LOGGING" + envIPFSLoggingFmt = "IPFS_LOGGING_FMT" + + envLogging = "GOLOG_LOG_LEVEL" + envLoggingFmt = "GOLOG_LOG_FMT" + + envLoggingFile = "GOLOG_FILE" // /path/to/file + envLoggingOutput = "GOLOG_OUTPUT" // possible values: stdout|stderr|file combine multiple values with '+' +) + +type LogFormat int + +const ( + ColorizedOutput LogFormat = iota + PlaintextOutput + JSONOutput +) + +type Config struct { + // Format overrides the format of the log output. Defaults to ColorizedOutput + Format LogFormat + + // Level is the minimum enabled logging level. + Level LogLevel + + // Stderr indicates whether logs should be written to stderr. + Stderr bool + + // Stdout indicates whether logs should be written to stdout. + Stdout bool + + // File is a path to a file that logs will be written to. + File string +} + +// ErrNoSuchLogger is returned when the util pkg is asked for a non existant logger +var ErrNoSuchLogger = errors.New("Error: No such logger") + +var loggerMutex sync.RWMutex // guards access to global logger state + +// loggers is the set of loggers in the system +var loggers = make(map[string]*zap.SugaredLogger) +var levels = make(map[string]zap.AtomicLevel) + +// primaryFormat is the format of the primary core used for logging +var primaryFormat LogFormat = ColorizedOutput + +// defaultLevel is the default log level +var defaultLevel LogLevel = LevelError + +// primaryCore is the primary logging core +var primaryCore zapcore.Core + +// loggerCore is the base for all loggers created by this package +var loggerCore = &lockedMultiCore{} + +// SetupLogging will initialize the logger backend and set the flags. +// TODO calling this in `init` pushes all configuration to env variables +// - move it out of `init`? then we need to change all the code (js-ipfs, go-ipfs) to call this explicitly +// - have it look for a config file? need to define what that is +func SetupLogging(cfg Config) { + loggerMutex.Lock() + defer loggerMutex.Unlock() + + primaryFormat = cfg.Format + defaultLevel = cfg.Level + + outputPaths := []string{} + + if cfg.Stderr { + outputPaths = append(outputPaths, "stderr") + } + if cfg.Stdout { + outputPaths = append(outputPaths, "stdout") + } + + // check if we log to a file + if len(cfg.File) > 0 { + if path, err := normalizePath(cfg.File); err != nil { + fmt.Fprintf(os.Stderr, "failed to resolve log path '%q', logging to %s\n", cfg.File, outputPaths) + } else { + outputPaths = append(outputPaths, path) + } + } + + ws, _, err := zap.Open(outputPaths...) + if err != nil { + panic(fmt.Sprintf("unable to open logging output: %v", err)) + } + + newPrimaryCore := newCore(primaryFormat, ws, LevelDebug) // the main core needs to log everything. + if primaryCore != nil { + loggerCore.ReplaceCore(primaryCore, newPrimaryCore) + } else { + loggerCore.AddCore(newPrimaryCore) + } + primaryCore = newPrimaryCore + + setAllLoggers(defaultLevel) +} + +// SetDebugLogging calls SetAllLoggers with logging.DEBUG +func SetDebugLogging() { + SetAllLoggers(LevelDebug) +} + +// SetAllLoggers changes the logging level of all loggers to lvl +func SetAllLoggers(lvl LogLevel) { + loggerMutex.RLock() + defer loggerMutex.RUnlock() + + setAllLoggers(lvl) +} + +func setAllLoggers(lvl LogLevel) { + for _, l := range levels { + l.SetLevel(zapcore.Level(lvl)) + } +} + +// SetLogLevel changes the log level of a specific subsystem +// name=="*" changes all subsystems +func SetLogLevel(name, level string) error { + lvl, err := LevelFromString(level) + if err != nil { + return err + } + + // wildcard, change all + if name == "*" { + SetAllLoggers(lvl) + return nil + } + + loggerMutex.RLock() + defer loggerMutex.RUnlock() + + // Check if we have a logger by that name + if _, ok := levels[name]; !ok { + return ErrNoSuchLogger + } + + levels[name].SetLevel(zapcore.Level(lvl)) + + return nil +} + +// SetLogLevelRegex sets all loggers to level `l` that match expression `e`. +// An error is returned if `e` fails to compile. +func SetLogLevelRegex(e, l string) error { + lvl, err := LevelFromString(l) + if err != nil { + return err + } + + rem, err := regexp.Compile(e) + if err != nil { + return err + } + + loggerMutex.Lock() + defer loggerMutex.Unlock() + for name := range loggers { + if rem.MatchString(name) { + levels[name].SetLevel(zapcore.Level(lvl)) + } + } + return nil +} + +// GetSubsystems returns a slice containing the +// names of the current loggers +func GetSubsystems() []string { + loggerMutex.RLock() + defer loggerMutex.RUnlock() + subs := make([]string, 0, len(loggers)) + + for k := range loggers { + subs = append(subs, k) + } + return subs +} + +func getLogger(name string) *zap.SugaredLogger { + loggerMutex.Lock() + defer loggerMutex.Unlock() + log, ok := loggers[name] + if !ok { + levels[name] = zap.NewAtomicLevelAt(zapcore.Level(defaultLevel)) + log = zap.New(loggerCore). + WithOptions( + zap.IncreaseLevel(levels[name]), + zap.AddCaller(), + ). + Named(name). + Sugar() + + loggers[name] = log + } + + return log +} + +// configFromEnv returns a Config with defaults populated using environment variables. +func configFromEnv() Config { + cfg := Config{ + Format: ColorizedOutput, + Stderr: true, + Level: LevelError, + } + + format := os.Getenv(envLoggingFmt) + if format == "" { + format = os.Getenv(envIPFSLoggingFmt) + } + + switch format { + case "nocolor": + cfg.Format = PlaintextOutput + case "json": + cfg.Format = JSONOutput + } + + lvl := os.Getenv(envLogging) + if lvl == "" { + lvl = os.Getenv(envIPFSLogging) + } + if lvl != "" { + var err error + cfg.Level, err = LevelFromString(lvl) + if err != nil { + fmt.Fprintf(os.Stderr, "error setting log levels: %s\n", err) + } + } + + cfg.File = os.Getenv(envLoggingFile) + // Disable stderr logging when a file is specified + // https://github.com/ipfs/go-log/issues/83 + if cfg.File != "" { + cfg.Stderr = false + } + + output := os.Getenv(envLoggingOutput) + outputOptions := strings.Split(output, "+") + for _, opt := range outputOptions { + switch opt { + case "stdout": + cfg.Stdout = true + case "stderr": + cfg.Stderr = true + case "file": + if cfg.File == "" { + fmt.Fprint(os.Stderr, "please specify a GOLOG_FILE value to write to") + } + } + } + + return cfg +} diff --git a/vendor/github.com/ipfs/go-log/writer/option.go b/vendor/github.com/ipfs/go-log/writer/option.go new file mode 100644 index 0000000000..b65d3a0baa --- /dev/null +++ b/vendor/github.com/ipfs/go-log/writer/option.go @@ -0,0 +1,4 @@ +package log + +// WriterGroup is the global writer group for logs to output to +var WriterGroup = NewMirrorWriter() diff --git a/vendor/github.com/ipfs/go-log/writer/writer.go b/vendor/github.com/ipfs/go-log/writer/writer.go new file mode 100644 index 0000000000..c2e4f452d7 --- /dev/null +++ b/vendor/github.com/ipfs/go-log/writer/writer.go @@ -0,0 +1,251 @@ +package log + +import ( + "fmt" + "io" + "sync" + "sync/atomic" +) + +// MaxWriterBuffer specifies how big the writer buffer can get before +// killing the writer. +var MaxWriterBuffer = 512 * 1024 + +// MirrorWriter implements a WriteCloser which syncs incoming bytes to multiple +// [buffered] WriteClosers. They can be added with AddWriter(). +type MirrorWriter struct { + active uint32 + + // channel for incoming writers + writerAdd chan *writerAdd + + // slices of writer/sync-channel pairs + writers []*bufWriter + + // synchronization channel for incoming writes + msgSync chan []byte +} + +// NewMirrorWriter initializes and returns a MirrorWriter. +func NewMirrorWriter() *MirrorWriter { + mw := &MirrorWriter{ + msgSync: make(chan []byte, 64), // sufficiently large buffer to avoid callers waiting + writerAdd: make(chan *writerAdd), + } + + go mw.logRoutine() + + return mw +} + +// Write broadcasts the written bytes to all Writers. +func (mw *MirrorWriter) Write(b []byte) (int, error) { + mycopy := make([]byte, len(b)) + copy(mycopy, b) + mw.msgSync <- mycopy + return len(b), nil +} + +// Close closes the MirrorWriter +func (mw *MirrorWriter) Close() error { + // it is up to the caller to ensure that write is not called during or + // after close is called. + close(mw.msgSync) + return nil +} + +func (mw *MirrorWriter) doClose() { + for _, w := range mw.writers { + w.writer.Close() + } +} + +func (mw *MirrorWriter) logRoutine() { + // rebind to avoid races on nilling out struct fields + msgSync := mw.msgSync + writerAdd := mw.writerAdd + + defer mw.doClose() + + for { + select { + case b, ok := <-msgSync: + if !ok { + return + } + + // write to all writers + dropped := mw.broadcastMessage(b) + + // consolidate the slice + if dropped { + mw.clearDeadWriters() + } + case wa := <-writerAdd: + mw.writers = append(mw.writers, newBufWriter(wa.w)) + + atomic.StoreUint32(&mw.active, 1) + close(wa.done) + } + } +} + +// broadcastMessage sends the given message to every writer +// if any writer is killed during the send, 'true' is returned +func (mw *MirrorWriter) broadcastMessage(b []byte) bool { + var dropped bool + for i, w := range mw.writers { + _, err := w.Write(b) + if err != nil { + mw.writers[i] = nil + dropped = true + } + } + return dropped +} + +func (mw *MirrorWriter) clearDeadWriters() { + writers := mw.writers + mw.writers = nil + for _, w := range writers { + if w != nil { + mw.writers = append(mw.writers, w) + } + } + if len(mw.writers) == 0 { + atomic.StoreUint32(&mw.active, 0) + } +} + +type writerAdd struct { + w io.WriteCloser + done chan struct{} +} + +// AddWriter attaches a new WriteCloser to this MirrorWriter. +// The new writer will start getting any bytes written to the mirror. +func (mw *MirrorWriter) AddWriter(w io.WriteCloser) { + wa := &writerAdd{ + w: w, + done: make(chan struct{}), + } + mw.writerAdd <- wa + <-wa.done +} + +// Active returns if there is at least one Writer +// attached to this MirrorWriter +func (mw *MirrorWriter) Active() (active bool) { + return atomic.LoadUint32(&mw.active) == 1 +} + +func newBufWriter(w io.WriteCloser) *bufWriter { + bw := &bufWriter{ + writer: w, + incoming: make(chan []byte, 1), + } + + go bw.loop() + return bw +} + +// writes incoming messages to a buffer and when it fills +// up, writes them to the writer +type bufWriter struct { + writer io.WriteCloser + + incoming chan []byte + + deathLock sync.Mutex + dead bool +} + +var errDeadWriter = fmt.Errorf("writer is dead") + +func (bw *bufWriter) Write(b []byte) (int, error) { + bw.deathLock.Lock() + dead := bw.dead + bw.deathLock.Unlock() + if dead { + if bw.incoming != nil { + close(bw.incoming) + bw.incoming = nil + } + return 0, errDeadWriter + } + + bw.incoming <- b + return len(b), nil +} + +func (bw *bufWriter) die() { + bw.deathLock.Lock() + bw.dead = true + bw.writer.Close() + bw.deathLock.Unlock() +} + +func (bw *bufWriter) loop() { + bufsize := 0 + bufBase := make([][]byte, 0, 16) // some initial memory + buffered := bufBase + nextCh := make(chan []byte) + + var nextMsg []byte + + go func() { + for b := range nextCh { + _, err := bw.writer.Write(b) + if err != nil { + // TODO: need a way to notify there was an error here + // wouldn't want to log here as it could casue an infinite loop + bw.die() + return + } + } + }() + + // collect and buffer messages + incoming := bw.incoming + for { + if nextMsg == nil || nextCh == nil { + // nextCh == nil implies we are 'dead' and draining the incoming channel + // until the caller notices and closes it for us + b, ok := <-incoming + if !ok { + return + } + nextMsg = b + } + + select { + case b, ok := <-incoming: + if !ok { + return + } + bufsize += len(b) + buffered = append(buffered, b) + if bufsize > MaxWriterBuffer { + // if we have too many messages buffered, kill the writer + bw.die() + if nextCh != nil { + close(nextCh) + } + nextCh = nil + // explicity keep going here to drain incoming + } + case nextCh <- nextMsg: + nextMsg = nil + if len(buffered) > 0 { + nextMsg = buffered[0] + buffered = buffered[1:] + bufsize -= len(nextMsg) + } + + if len(buffered) == 0 { + // reset slice position + buffered = bufBase[:0] + } + } + } +} diff --git a/vendor/github.com/ipfs/go-metrics-interface/.gitignore b/vendor/github.com/ipfs/go-metrics-interface/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/ipfs/go-metrics-interface/.travis.yml b/vendor/github.com/ipfs/go-metrics-interface/.travis.yml new file mode 100644 index 0000000000..4cfe98c242 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-metrics-interface/LICENSE b/vendor/github.com/ipfs/go-metrics-interface/LICENSE new file mode 100644 index 0000000000..ff68748396 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 IPFS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ipfs/go-metrics-interface/context.go b/vendor/github.com/ipfs/go-metrics-interface/context.go new file mode 100644 index 0000000000..8796b8b988 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/context.go @@ -0,0 +1,26 @@ +package metrics + +import "context" + +const CtxScopeKey = "ipfs.metrics.scope" + +func CtxGetScope(ctx context.Context) string { + s := ctx.Value(CtxScopeKey) + if s == nil { + return "" + } + str, ok := s.(string) + if !ok { + return "" + } + return str +} + +func CtxScope(ctx context.Context, scope string) context.Context { + return context.WithValue(ctx, CtxScopeKey, scope) +} + +func CtxSubScope(ctx context.Context, subscope string) context.Context { + curscope := CtxGetScope(ctx) + return CtxScope(ctx, curscope+"."+subscope) +} diff --git a/vendor/github.com/ipfs/go-metrics-interface/ctor.go b/vendor/github.com/ipfs/go-metrics-interface/ctor.go new file mode 100644 index 0000000000..aac694e5f8 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/ctor.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "context" + "errors" +) + +var ErrImplemented = errors.New("there is implemenation already injected") + +var ctorImpl InternalNew = nil + +// name is dot spearated path +// must be uniqe, use system naming, and unit postfix, examples: +// ipfs.blockstore.bloomcache.bloom.miss.total +// ipfs.routing.dht.notresuingstream.total +// +// both arguemnts are obligatory +func New(name, helptext string) Creator { + if ctorImpl == nil { + return &noop{} + } else { + return ctorImpl(name, helptext) + } +} + +func NewCtx(ctx context.Context, name, helptext string) Creator { + return New(CtxGetScope(ctx)+"."+name, helptext) +} + +type InternalNew func(string, string) Creator + +func InjectImpl(newimpl InternalNew) error { + if ctorImpl != nil { + return ErrImplemented + } else { + ctorImpl = newimpl + return nil + } +} + +func Active() bool { + return ctorImpl != nil +} diff --git a/vendor/github.com/ipfs/go-metrics-interface/go.mod b/vendor/github.com/ipfs/go-metrics-interface/go.mod new file mode 100644 index 0000000000..57654087aa --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/go.mod @@ -0,0 +1 @@ +module github.com/ipfs/go-metrics-interface diff --git a/vendor/github.com/ipfs/go-metrics-interface/interface.go b/vendor/github.com/ipfs/go-metrics-interface/interface.go new file mode 100644 index 0000000000..f1237593c5 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/interface.go @@ -0,0 +1,45 @@ +package metrics + +import ( + "time" +) + +// Increment only metric +type Counter interface { + Inc() + Add(float64) // Only positive +} + +// Increse and decrese metric +type Gauge interface { + Set(float64) // Introduced discontinuity + Inc() + Dec() + Add(float64) + Sub(float64) +} + +type Histogram interface { + Observe(float64) // Adds observation to Histogram +} + +type Summary interface { + Observe(float64) // Adds observation to Summary +} + +// Consult http://godoc.org/github.com/prometheus/client_golang/prometheus#SummaryOpts +type SummaryOpts struct { + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 +} + +type Creator interface { + Counter() Counter + Gauge() Gauge + Histogram(buckets []float64) Histogram + + // opts cannot be nil, use empty summary instance + Summary(opts SummaryOpts) Summary +} diff --git a/vendor/github.com/ipfs/go-metrics-interface/noop.go b/vendor/github.com/ipfs/go-metrics-interface/noop.go new file mode 100644 index 0000000000..5b59aa83c3 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/noop.go @@ -0,0 +1,46 @@ +package metrics + +// Also implements the Counter interface +type noop struct{} + +func (g *noop) Set(v float64) { + // Noop +} + +func (g *noop) Inc() { + // Noop +} + +func (g *noop) Dec() { + // Noop +} + +func (g *noop) Add(v float64) { + // Noop +} + +func (g *noop) Sub(v float64) { + // Noop +} + +func (g *noop) Observe(v float64) { + // Noop +} + +// Creator functions + +func (g *noop) Counter() Counter { + return g +} + +func (g *noop) Gauge() Gauge { + return g +} + +func (g *noop) Histogram(buckets []float64) Histogram { + return g +} + +func (g *noop) Summary(opts SummaryOpts) Summary { + return g +} diff --git a/vendor/github.com/ipfs/go-metrics-interface/package.json b/vendor/github.com/ipfs/go-metrics-interface/package.json new file mode 100644 index 0000000000..6ba42d7610 --- /dev/null +++ b/vendor/github.com/ipfs/go-metrics-interface/package.json @@ -0,0 +1,16 @@ +{ + "author": "ipfs", + "bugs": { + "URL": "https://github.com/ipfs/go-metrics-interface/issues", + "url": "https://github.com/ipfs/go-metrics-interface/issues" + }, + "gx": { + "dvcsimport": "github.com/ipfs/go-metrics-interface" + }, + "gxVersion": "0.9.0", + "language": "go", + "license": "MIT", + "name": "go-metrics-interface", + "version": "0.2.0" +} + diff --git a/vendor/github.com/ipfs/go-verifcid/.travis.yml b/vendor/github.com/ipfs/go-verifcid/.travis.yml new file mode 100644 index 0000000000..4cfe98c242 --- /dev/null +++ b/vendor/github.com/ipfs/go-verifcid/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/ipfs/go-verifcid/go.mod b/vendor/github.com/ipfs/go-verifcid/go.mod new file mode 100644 index 0000000000..5db3e877f3 --- /dev/null +++ b/vendor/github.com/ipfs/go-verifcid/go.mod @@ -0,0 +1,6 @@ +module github.com/ipfs/go-verifcid + +require ( + github.com/ipfs/go-cid v0.0.1 + github.com/multiformats/go-multihash v0.0.1 +) diff --git a/vendor/github.com/ipfs/go-verifcid/go.sum b/vendor/github.com/ipfs/go-verifcid/go.sum new file mode 100644 index 0000000000..4525c37ee0 --- /dev/null +++ b/vendor/github.com/ipfs/go-verifcid/go.sum @@ -0,0 +1,22 @@ +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-cid v0.0.1 h1:GBjWPktLnNyX0JiQCNFpUuUSoMw5KMyqrsejHYlILBE= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/ipfs/go-verifcid/package.json b/vendor/github.com/ipfs/go-verifcid/package.json new file mode 100644 index 0000000000..a066b1d8a0 --- /dev/null +++ b/vendor/github.com/ipfs/go-verifcid/package.json @@ -0,0 +1,28 @@ +{ + "author": "why", + "bugs": {}, + "gx": { + "dvcsimport": "github.com/ipfs/go-verifcid" + }, + "gxDependencies": [ + { + "author": "multiformats", + "hash": "QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW", + "name": "go-multihash", + "version": "1.0.9" + }, + { + "author": "whyrusleeping", + "hash": "QmTbxNB1NwDesLmKTscr4udL2tVP7MaxvXnD1D9yX7g3PN", + "name": "go-cid", + "version": "0.9.3" + } + ], + "gxVersion": "0.12.1", + "language": "go", + "license": "", + "name": "go-verifcid", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "0.1.2" +} + diff --git a/vendor/github.com/ipfs/go-verifcid/validate.go b/vendor/github.com/ipfs/go-verifcid/validate.go new file mode 100644 index 0000000000..8a76e4933e --- /dev/null +++ b/vendor/github.com/ipfs/go-verifcid/validate.go @@ -0,0 +1,62 @@ +package verifcid + +import ( + "fmt" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var ErrPossiblyInsecureHashFunction = fmt.Errorf("potentially insecure hash functions not allowed") +var ErrBelowMinimumHashLength = fmt.Errorf("hashes must be at %d least bytes long", minimumHashLength) + +const minimumHashLength = 20 + +var goodset = map[uint64]bool{ + mh.SHA2_256: true, + mh.SHA2_512: true, + mh.SHA3_224: true, + mh.SHA3_256: true, + mh.SHA3_384: true, + mh.SHA3_512: true, + mh.SHAKE_256: true, + mh.DBL_SHA2_256: true, + mh.KECCAK_224: true, + mh.KECCAK_256: true, + mh.KECCAK_384: true, + mh.KECCAK_512: true, + mh.ID: true, + + mh.SHA1: true, // not really secure but still useful +} + +func IsGoodHash(code uint64) bool { + good, found := goodset[code] + if good { + return true + } + + if !found { + if code >= mh.BLAKE2B_MIN+19 && code <= mh.BLAKE2B_MAX { + return true + } + if code >= mh.BLAKE2S_MIN+19 && code <= mh.BLAKE2S_MAX { + return true + } + } + + return false +} + +func ValidateCid(c cid.Cid) error { + pref := c.Prefix() + if !IsGoodHash(pref.MhType) { + return ErrPossiblyInsecureHashFunction + } + + if pref.MhType != mh.ID && pref.MhLength < minimumHashLength { + return ErrBelowMinimumHashLength + } + + return nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/.gitmodules b/vendor/github.com/ipld/go-ipld-prime/.gitmodules new file mode 100644 index 0000000000..9326cf9f2d --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/.gitmodules @@ -0,0 +1,39 @@ +[submodule ".gopath/src/github.com/polydawn/refmt"] + path = .gopath/src/github.com/polydawn/refmt + url = https://github.com/polydawn/refmt +[submodule ".gopath/src/golang.org/x/sys"] + path = .gopath/src/golang.org/x/sys + url = https://go.googlesource.com/sys +[submodule ".gopath/src/golang.org/x/crypto"] + path = .gopath/src/golang.org/x/crypto + url = https://go.googlesource.com/crypto +[submodule ".gopath/src/github.com/ipfs/go-cid"] + path = .gopath/src/github.com/ipfs/go-cid + url = https://github.com/ipfs/go-cid +[submodule ".gopath/src/github.com/multiformats/go-multibase"] + path = .gopath/src/github.com/multiformats/go-multibase + url = https://github.com/multiformats/go-multibase +[submodule ".gopath/src/github.com/multiformats/go-multihash"] + path = .gopath/src/github.com/multiformats/go-multihash + url = https://github.com/multiformats/go-multihash +[submodule ".gopath/src/github.com/minio/sha256-simd"] + path = .gopath/src/github.com/minio/sha256-simd + url = https://github.com/minio/sha256-simd +[submodule ".gopath/src/github.com/minio/blake2b-simd"] + path = .gopath/src/github.com/minio/blake2b-simd + url = https://github.com/minio/blake2b-simd +[submodule ".gopath/src/github.com/gxed/hashland"] + path = .gopath/src/github.com/gxed/hashland + url = https://github.com/gxed/hashland +[submodule ".gopath/src/github.com/mr-tron/base58"] + path = .gopath/src/github.com/mr-tron/base58 + url = https://github.com/mr-tron/base58 +[submodule ".gopath/src/github.com/spaolacci/murmur3"] + path = .gopath/src/github.com/spaolacci/murmur3 + url = https://github.com/spaolacci/murmur3 +[submodule ".gopath/src/github.com/multiformats/go-base32"] + path = .gopath/src/github.com/multiformats/go-base32 + url = https://github.com/multiformats/go-base32 +[submodule ".gopath/src/github.com/warpfork/go-wish"] + path = .gopath/src/github.com/warpfork/go-wish + url = https://github.com/warpfork/go-wish diff --git a/vendor/github.com/ipld/go-ipld-prime/.travis.yml b/vendor/github.com/ipld/go-ipld-prime/.travis.yml new file mode 100644 index 0000000000..44f532ae25 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: +- 1.14.x +- 1.15.x + +notifications: + email: false + +script: +- go test ./... diff --git a/vendor/github.com/ipld/go-ipld-prime/HACKME.md b/vendor/github.com/ipld/go-ipld-prime/HACKME.md new file mode 100644 index 0000000000..971a37672b --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/HACKME.md @@ -0,0 +1,120 @@ +hackme +====== + +Design rational are documented here. + +This doc is not necessary reading for users of this package, +but if you're considering submitting patches -- or just trying to understand +why it was written this way, and check for reasoning that might be dated -- +then it might be useful reading. + +It may also be an incomplete doc. It's been written opportunistically. +If you don't understand the rationale for some things, try checking git history +(many of the commit messages are downright bookish), or get in touch via +a github issue, irc, matrix, etc and ask! + + +about NodeAssembler and NodeBuilder +----------------------------------- + +See the godoc on these types. + +In short, a `NodeBuilder` is for creating a new piece of memory; +a `NodeAssembler` is for instantiating some memory which you already have. + +Generally, you'll start any function using a `NodeBuilder`, but then continue +and recurse by passing on the `NodeAssembler`. + +See the `./HACKME_builderBehaviors.md` doc for more details on +high level rules and implementation patterns to look out for. + + + +about NodePrototype +--------------- + +### NodePrototype promises information without allocations + +You'll notice nearly every `ipld.NodePrototype` implementation is +a golang struct type with _zero fields_. + +This is important. +Getting a NodePrototype is generally expected to be "free" (i.e., zero allocations), +while `NewBuilder` is allowed to be costly (usually causes at least one allocation). +Zero-member structs can be referred to by an interface without requiring an allocation, +which is how it's possible ensure `NodePrototype` are always "free" to refer to. + +(Note that a `NodePrototype` that bundles some information like ADL configuration +will subvert this pattern -- but these are an exception, not the rule.) + +### NodePrototype reported by a Node + +`ipld.NodePrototype` is a type that opaquely represents some information about how +a node was constructed and is implemented. The general contract for what +should happen when asking a node about its prototype +(via the `ipld.Node.Prototype() NodePrototype` interface) is that prototype should contain +effective instructions for how one could build a copy of that node, using +the same implementation details. + +By example, if some node `n` was made as a `basicnode.plainString`, +then `n.Prototype()` will be `basicnode.Prototype__String{}`, +and `n.Prototype().NewBuilder().AssignString("xyz")` can be presumed to work. + +Note there are also limits to this: if a node was built in a flexible way, +the prototype it reports later may only report what it is now, and not return +that same flexibility again. +By example, if something was made as an "any" -- i.e., +via `basicnode.Prototype__Any{}.NewBuilder()`, and then *happened* to be assigned a string value -- +the resulting node will still carry a `Prototype()` property that returns +`Prototype__String` -- **not** `Prototype__Any`. + +#### NodePrototype meets generic transformation + +One of the core purposes of the `NodePrototype` interface (and all the different +ways you can get it from existing data) is to enable the `traversal` package +(or other user-written packages like it) to do transformations on data. + +// work-in-progress warning: generic transformations are not fully implemented. + +When implementating a transformation that works over unknown data, +the signiture of function a user provides is roughly: +`func(oldValue Node, acceptableValues NodePrototype) (Node, error)`. +(This signiture may vary by the strategy taken by the transformation -- this +signiture is useful because it's capable of no-op'ing; an alternative signiture +might give the user a `NodeAssembler` instead of the `NodePrototype`.) + +In this situation, the transformation system determines the `NodePrototype` +(or `NodeAssembler`) to use by asking the parent value of the one we're visiting. +This is because we want to give the update function the ability to create +any kind of value that would be accepted in this position -- not just create a +value of the same prototype as the one currently there! It is for this reason +the `oldValue.Prototype()` property can't be used directly. + +At the root of such a transformation, we use the `node.Prototype()` property to +determine how to get started building a new value. + +#### NodePrototype meets recursive assemblers + +Asking for a NodePrototype in a recursive assembly process tells you about what +kind of node would be accepted in an `AssignNode(Node)` call. +It does *not* make any remark on the fact it's a key assembler or value assembler +and might be wrapped with additional rules (such as map key uniqueness, field +name expectations, etc). + +(Note that it's also not an exclusive statement about what `AssignNode(Node)` will +accept; e.g. in many situations, while a `Prototype__MyStringType` might be the prototype +returned, any string kinded node can be used in `AssignNode(Node)` and will be +appropriately converted.) + +Any of these paths counts as "recursive assembly process": + +- `MapAssembler.KeyPrototype()` +- `MapAssembler.ValuePrototype(string)` +- `MapAssembler.AssembleKey().Prototype()` +- `MapAssembler.AssembleValue().Prototype()` +- `ListAssembler.ValuePrototype()` +- `ListAssembler.AssembleValue().Prototype()` + +### NodePrototype for carrying ADL configuration + +// work-in-progress warning: this is an intention of the design, but not implemented. diff --git a/vendor/github.com/ipld/go-ipld-prime/HACKME_builderBehaviors.md b/vendor/github.com/ipld/go-ipld-prime/HACKME_builderBehaviors.md new file mode 100644 index 0000000000..c4c977b7db --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/HACKME_builderBehaviors.md @@ -0,0 +1,61 @@ +hackme: NodeBuilder and NodeAssembler behaviors +=============================================== + +high level rules of builders and assemblers +------------------------------------------- + +- Errors should be returned as soon as possible. + - That means an error like "repeated key in map" should be returned by the key assembler! + - Either 'NodeAssembler.AssignString' should return this (for simple keys on untyped maps, or on structs, etc)... + - ... or 'MapAssembler.Finish' (in the case of complex keys in a typed map). + +- Logical integrity checks must be done locally -- recursive types rely on their contained types to report errors, and the recursive type wraps the assemblers of their contained type in order to check and correctly invalidate/rollback the recursive construction. + +- Recursive types tend to have a value assembler that wraps the child type's assembler in order to intercept relevant "finish" methods. + - This is generally where that logic integrity check mentioned above is tracked; we need explicit confirmation that it *passes* before the parent's assembly should proceed. + - Implementations may also need this moment to complete any assignment of the child value into position in the parent value. But not all implementations need this -- some will have had all the child assembler effects applying directly to the final memory positions. + +- Assemblers should invalidate themselves as soon as they become "finished". + - For maps and lists, that means the "Finish" methods. + - For all the other scalars, the "Assign*" method itself means finished. + - Or in other words: whatever method returns an `error`, that's what makes that assembler "finished". + - The purpose of this is to prevent accidental mutation after any validations have been performed during the "finish" processing. + +- Many methods must be called in the right order, and the user must not hold onto references after calling "finish" methods on them. + - The reason this is important is to enable assembler systems to agressively reuse memory, thus increasing performance. + - Thus, if you hold onto NodeAssembler reference after being finished with it... you can't assume it'll explicitly error if you call further methods on it, because it might now be operating again... _on a different target_. + - In recursive structures, calling AssembleKey or AssembleValue might return pointer-identical assemblers (per warning in previous bullet), but the memory their assembly is targetted to should always advance -- it should never target already-assembled memory. + - (If you're thinking "the Rust memory model would be able to greatly enhance safety here!"... yes. Yes it would.) + - When misuses of order are detected, these may cause panics (rather than error returns) (not all methods that can be so misused have error returns). + + +detailed rules and expectations for implementers +------------------------------------------------ + +The expectations in the "happy path" are often clear. +Here are also collected some details of exactly what should happen when an error has been reached, +but the caller tries to continue anyway. + +- while building maps: + - assigning a key with 'AssembleKey': + - in case of success: clearly 'AssembleValue' should be ready to use next. + - in case of failure from repeated key: + - the error must be returned immediately from either the 'NodeAssembler.AssignString' or the 'MapAssembler.Finish' method. + - 'AssignString' for any simple keys; 'MapAssembler.Finish' may be relevant in the case of complex keys in a typed map. + - implementers take note: this implies the `NodeAssembler` returned by `AssembleKey` has some way to refer to the map assembler that spawned it. + - no side effect should be visible if 'AssembleKey' is called again next. + - (typically this doesn't require extra code for the string case, but it may require some active zeroing in the complex key case.) + - (remember to reset any internal flag for expecting 'AssembleValue' to be used next, and decrement any length pointers that were optimistically incremented!) + - n.b. the "no side effect" rule here is for keys, not for values. + - TODO/REVIEW: do we want the no-side-effect rule for values? it might require nontrivial volumes of zeroing, and often in practice, this might be wasteful. + +- invalidation of assemblers: + - is typically implemented by nil'ing the wip node they point to. + - this means you get nil pointer dereference panics when attempting to use an assembler after it's finished... which is not the greatest error message. + - but it does save us a lot of check code for a situation that the user certainly shouldn't get into in the first place. + - (worth review: can we add that check code without extra runtime cost? possibly, because the compiler might then skip its own implicit check branches. might still increase SLOC noticably in codegen output, though.) + - worth noting there's a limit to how good this can be anyway: it's "best effort" error reporting: see the remarks on reuse of assembler memory in "overall rules" above. + - it's systemically critical to not yield an assembler _ever again in the future_ that refers to some memory already considered finished. + - even though we no longer return intermediate nodes, there's still many ways this could produce problems. For example, complicating (if not outright breaking) COW sharing of segments of data. + - in most situations, we get this for free, because the child assembler methods only go "forward" -- there's no backing up, lists have no random index insertion or update support, and maps actively reject dupe keys. + - if you *do* make a system which exposes any of those features... be very careful; you will probably need to start tracking "freeze" flags on the data in order to retain systemic sanity. diff --git a/vendor/github.com/ipld/go-ipld-prime/LICENSE b/vendor/github.com/ipld/go-ipld-prime/LICENSE new file mode 100644 index 0000000000..80e1f8f672 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 Eric Myhre + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/ipld/go-ipld-prime/README.md b/vendor/github.com/ipld/go-ipld-prime/README.md new file mode 100644 index 0000000000..29beb91a46 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/README.md @@ -0,0 +1,111 @@ +go-ipld-prime +============= + +`go-ipld-prime` is an implementation of the IPLD spec interfaces, +a batteries-included codec implementations of IPLD for CBOR and JSON, +and tooling for basic operations on IPLD objects (traversals, etc). + + + +API +--- + +The API is split into several packages based on responsibly of the code. +The most central interfaces are the base package, +but you'll certainly need to import additional packages to get concrete implementations into action. + +Roughly speaking, the core package interfaces are all about the IPLD Data Model; +the 'codec/*' packages contain functions for parsing serial data into the IPLD Data Model, +and converting Data Model content back into serial formats; +the 'traversal' package is an example of higher-order functions on the Data Model; +concrete 'Node' implementations ready to use can be found in packages in the 'node/*' directory; +and several additional packages contain advanced features such as IPLD Schemas. + +(Because the codecs, as well as higher-order features like traversals, are +implemented in a separate package from the core interfaces or any of the Node implementations, +you can be sure they're not doing any funky "magic" -- all this stuff will work the same +if you want to write your own extensions, whether for new Node implementations +or new codecs, or new higher-order order functions!) + +- `github.com/ipld/go-ipld-prime` -- imported as just `ipld` -- contains the core interfaces for IPLD. The most important interfaces are `Node`, `NodeBuilder`, `Path`, and `Link`. +- `github.com/ipld/go-ipld-prime/node/basic` -- imported as `basicnode` -- provides concrete implementations of `Node` and `NodeBuilder` which work for any kind of data. +- `github.com/ipld/go-ipld-prime/traversal` -- contains higher-order functions for traversing graphs of data easily. +- `github.com/ipld/go-ipld-prime/traversal/selector` -- contains selectors, which are sort of like regexps, but for trees and graphs of IPLD data! +- `github.com/ipld/go-ipld-prime/codec -- parent package of all the codec implementations! +- `github.com/ipld/go-ipld-prime/codec/dagcbor` -- implementations of marshalling and unmarshalling as CBOR (a fast, binary serialization format). +- `github.com/ipld/go-ipld-prime/codec/dagjson` -- implementations of marshalling and unmarshalling as JSON (a popular human readable format). +- `github.com/ipld/go-ipld-prime/linking/cid` -- imported as `cidlink` -- provides concrete implementations of `Link` as a CID. Also, the multicodec registry. +- `github.com/ipld/go-ipld-prime/schema` -- contains the `schema.Type` and `schema.TypedNode` interface declarations, which represent IPLD Schema type information. +- `github.com/ipld/go-ipld-prime/node/typed` -- provides concrete implementations of `schema.TypedNode` which decorate a basic `Node` at runtime to have additional features described by IPLD Schemas. + + + +Other IPLD Libraries +-------------------- + +The IPLD specifications are designed to be language-agnostic. +Many implementations exist in a variety of languages. + +For overall behaviors and specifications, refer to the specs repo: + https://github.com/ipld/specs/ + + +### distinctions from go-ipld-interface&go-ipld-cbor + +This library ("go ipld prime") is the current head of development for golang IPLD, +but several other libraries exist which are widely deployed. + +This library is a clean take on the IPLD interfaces and addresses several design decisions very differently than existing libraries: + +- The Node interfaces are minimal (and match cleanly to the IPLD Data Model); +- Many features known to be legacy are dropped; +- The Link implementations are purely CIDs; +- The Path implementations are provided in the same box; +- The JSON and CBOR implementations are provided in the same box; +- And several odd dependencies on blockstore and other interfaces from the rest of the IPFS ecosystem are removed. + +Many of these changes had been discussed for the other IPLD codebases as well, +but we chose clean break v2 as a more viable project-management path. +Both the existing IPLD libraries and go-ipld-prime can co-exist on the same import path, and refer to the same kinds of serial data. +Projects wishing to migrate can do so smoothly and at their leisure. + +There is no explicit deprecation timeline for the earlier golang IPLD libraries, +but you should expect new features *here*, rather than in those libraries. + +Be advised that faculties for dealing with unixfsv1 data are still limited. +You can find some tools in the [go-ipld-prime-proto](https://github.com/ipld/go-ipld-prime-proto/) repo, +but be sure to read the caveats and limitations in that project's readme. +We're happy to accept major PRs on this topic, though, if you who is reading this wants to fix this faster than wait for us :) + + + +Change Policy +------------- + +The go-ipld-prime library is already usable. We are also still in development, and may still change things. + +A changelog can be found at [CHANGELOG.md](CHANGELOG.md). + +Using a commit hash when depending on this library is advisable (as it is with any other). + +We may sometimes tag releases, but it's just as acceptable to track commits on master without the indirection. + +The following are all norms you can expect of changes to this codebase: + +- The `master` branch will not be force-pushed. + - (exceptional circumstances may exist, but such exceptions will only be considered valid for about as long after push as the "$N-second-rule" about dropped food). + - Therefore, commit hashes on master are gold to link against. +- All other branches *will* be force-pushed. + - Therefore, commit hashes not reachable from the master branch are inadvisable to link against. +- If it's on master, it's understood to be good, in as much as we can tell. +- Development proceeds -- both starting from and ending on -- the `master` branch. + - There are no other long-running supported-but-not-master branches. + - The existence of tags at any particular commit do not indicate that we will consider starting a long running and supported diverged branch from that point, nor start doing backports, etc. +- All changes are presumed breaking until proven otherwise; and we don't have the time and attention budget at this point for doing the "proven otherwise". + - All consumers updating their libraries should run their own compiler, linking, and test suites before assuming the update applies cleanly -- as is good practice regardless. + - Any idea of semver indicating more or less breakage should be treated as a street vendor selling potions of levitation -- it's likely best disregarded. + +None of this is to say we'll go breaking things willy-nilly for fun; but it *is* to say: + +- Staying close to master is always better than not staying close to master; +- and trust your compiler and your tests rather than tea-leaf patterns in a tag string. diff --git a/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/common.go b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/common.go new file mode 100644 index 0000000000..2542d9ce9e --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/common.go @@ -0,0 +1,3 @@ +package dagcbor + +const linkTag = 42 diff --git a/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/marshal.go b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/marshal.go new file mode 100644 index 0000000000..9b768119a9 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/marshal.go @@ -0,0 +1,145 @@ +package dagcbor + +import ( + "fmt" + + "github.com/polydawn/refmt/shared" + "github.com/polydawn/refmt/tok" + + ipld "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" +) + +// This should be identical to the general feature in the parent package, +// except for the `case ipld.ReprKind_Link` block, +// which is dag-cbor's special sauce for schemafree links. +func Marshal(n ipld.Node, sink shared.TokenSink) error { + var tk tok.Token + return marshal(n, &tk, sink) +} + +func marshal(n ipld.Node, tk *tok.Token, sink shared.TokenSink) error { + switch n.ReprKind() { + case ipld.ReprKind_Invalid: + return fmt.Errorf("cannot traverse a node that is absent") + case ipld.ReprKind_Null: + tk.Type = tok.TNull + _, err := sink.Step(tk) + return err + case ipld.ReprKind_Map: + // Emit start of map. + tk.Type = tok.TMapOpen + tk.Length = n.Length() + if _, err := sink.Step(tk); err != nil { + return err + } + // Emit map contents (and recurse). + for itr := n.MapIterator(); !itr.Done(); { + k, v, err := itr.Next() + if err != nil { + return err + } + tk.Type = tok.TString + tk.Str, err = k.AsString() + if err != nil { + return err + } + if _, err := sink.Step(tk); err != nil { + return err + } + if err := marshal(v, tk, sink); err != nil { + return err + } + } + // Emit map close. + tk.Type = tok.TMapClose + _, err := sink.Step(tk) + return err + case ipld.ReprKind_List: + // Emit start of list. + tk.Type = tok.TArrOpen + l := n.Length() + tk.Length = l + if _, err := sink.Step(tk); err != nil { + return err + } + // Emit list contents (and recurse). + for i := 0; i < l; i++ { + v, err := n.LookupByIndex(i) + if err != nil { + return err + } + if err := marshal(v, tk, sink); err != nil { + return err + } + } + // Emit list close. + tk.Type = tok.TArrClose + _, err := sink.Step(tk) + return err + case ipld.ReprKind_Bool: + v, err := n.AsBool() + if err != nil { + return err + } + tk.Type = tok.TBool + tk.Bool = v + _, err = sink.Step(tk) + return err + case ipld.ReprKind_Int: + v, err := n.AsInt() + if err != nil { + return err + } + tk.Type = tok.TInt + tk.Int = int64(v) + _, err = sink.Step(tk) + return err + case ipld.ReprKind_Float: + v, err := n.AsFloat() + if err != nil { + return err + } + tk.Type = tok.TFloat64 + tk.Float64 = v + _, err = sink.Step(tk) + return err + case ipld.ReprKind_String: + v, err := n.AsString() + if err != nil { + return err + } + tk.Type = tok.TString + tk.Str = v + _, err = sink.Step(tk) + return err + case ipld.ReprKind_Bytes: + v, err := n.AsBytes() + if err != nil { + return err + } + tk.Type = tok.TBytes + tk.Bytes = v + _, err = sink.Step(tk) + return err + case ipld.ReprKind_Link: + v, err := n.AsLink() + if err != nil { + return err + } + switch lnk := v.(type) { + case cidlink.Link: + tk.Type = tok.TBytes + tk.Bytes = append([]byte{0}, lnk.Bytes()...) + tk.Tagged = true + tk.Tag = linkTag + _, err = sink.Step(tk) + tk.Tagged = false + return err + default: + return fmt.Errorf("schemafree link emission only supported by this codec for CID type links") + } + default: + panic("unreachable") + } +} diff --git a/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/multicodec.go b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/multicodec.go new file mode 100644 index 0000000000..34b822ac3a --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/multicodec.go @@ -0,0 +1,46 @@ +package dagcbor + +import ( + "io" + + "github.com/polydawn/refmt/cbor" + + ipld "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" +) + +var ( + _ cidlink.MulticodecDecoder = Decoder + _ cidlink.MulticodecEncoder = Encoder +) + +func init() { + cidlink.RegisterMulticodecDecoder(0x71, Decoder) + cidlink.RegisterMulticodecEncoder(0x71, Encoder) +} + +func Decoder(na ipld.NodeAssembler, r io.Reader) error { + // Probe for a builtin fast path. Shortcut to that if possible. + // (ipldcbor.NodeBuilder supports this, for example.) + type detectFastPath interface { + DecodeDagCbor(io.Reader) error + } + if na2, ok := na.(detectFastPath); ok { + return na2.DecodeDagCbor(r) + } + // Okay, generic builder path. + return Unmarshal(na, cbor.NewDecoder(cbor.DecodeOptions{}, r)) +} + +func Encoder(n ipld.Node, w io.Writer) error { + // Probe for a builtin fast path. Shortcut to that if possible. + // (ipldcbor.Node supports this, for example.) + type detectFastPath interface { + EncodeDagCbor(io.Writer) error + } + if n2, ok := n.(detectFastPath); ok { + return n2.EncodeDagCbor(w) + } + // Okay, generic inspection path. + return Marshal(n, cbor.NewEncoder(w)) +} diff --git a/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/unmarshal.go b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/unmarshal.go new file mode 100644 index 0000000000..d22d40f993 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/codec/dagcbor/unmarshal.go @@ -0,0 +1,152 @@ +package dagcbor + +import ( + "errors" + "fmt" + "math" + + cid "github.com/ipfs/go-cid" + "github.com/polydawn/refmt/shared" + "github.com/polydawn/refmt/tok" + + ipld "github.com/ipld/go-ipld-prime" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" +) + +var ( + ErrInvalidMultibase = errors.New("invalid multibase on IPLD link") +) + +// This should be identical to the general feature in the parent package, +// except for the `case tok.TBytes` block, +// which has dag-cbor's special sauce for detecting schemafree links. + +func Unmarshal(na ipld.NodeAssembler, tokSrc shared.TokenSource) error { + var tk tok.Token + done, err := tokSrc.Step(&tk) + if err != nil { + return err + } + if done && !tk.Type.IsValue() { + return fmt.Errorf("unexpected eof") + } + return unmarshal(na, tokSrc, &tk) +} + +// starts with the first token already primed. Necessary to get recursion +// to flow right without a peek+unpeek system. +func unmarshal(na ipld.NodeAssembler, tokSrc shared.TokenSource, tk *tok.Token) error { + // FUTURE: check for schema.TypedNodeBuilder that's going to parse a Link (they can slurp any token kind they want). + switch tk.Type { + case tok.TMapOpen: + expectLen := tk.Length + allocLen := tk.Length + if tk.Length == -1 { + expectLen = math.MaxInt32 + allocLen = 0 + } + ma, err := na.BeginMap(allocLen) + if err != nil { + return err + } + observedLen := 0 + for { + _, err := tokSrc.Step(tk) + if err != nil { + return err + } + switch tk.Type { + case tok.TMapClose: + if expectLen != math.MaxInt32 && observedLen != expectLen { + return fmt.Errorf("unexpected mapClose before declared length") + } + return ma.Finish() + case tok.TString: + // continue + default: + return fmt.Errorf("unexpected %s token while expecting map key", tk.Type) + } + observedLen++ + if observedLen > expectLen { + return fmt.Errorf("unexpected continuation of map elements beyond declared length") + } + mva, err := ma.AssembleEntry(tk.Str) + if err != nil { // return in error if the key was rejected + return err + } + err = Unmarshal(mva, tokSrc) + if err != nil { // return in error if some part of the recursion errored + return err + } + } + case tok.TMapClose: + return fmt.Errorf("unexpected mapClose token") + case tok.TArrOpen: + expectLen := tk.Length + allocLen := tk.Length + if tk.Length == -1 { + expectLen = math.MaxInt32 + allocLen = 0 + } + la, err := na.BeginList(allocLen) + if err != nil { + return err + } + observedLen := 0 + for { + _, err := tokSrc.Step(tk) + if err != nil { + return err + } + switch tk.Type { + case tok.TArrClose: + if expectLen != math.MaxInt32 && observedLen != expectLen { + return fmt.Errorf("unexpected arrClose before declared length") + } + return la.Finish() + default: + observedLen++ + if observedLen > expectLen { + return fmt.Errorf("unexpected continuation of array elements beyond declared length") + } + err := unmarshal(la.AssembleValue(), tokSrc, tk) + if err != nil { // return in error if some part of the recursion errored + return err + } + } + } + case tok.TArrClose: + return fmt.Errorf("unexpected arrClose token") + case tok.TNull: + return na.AssignNull() + case tok.TString: + return na.AssignString(tk.Str) + case tok.TBytes: + if !tk.Tagged { + return na.AssignBytes(tk.Bytes) + } + switch tk.Tag { + case linkTag: + if tk.Bytes[0] != 0 { + return ErrInvalidMultibase + } + elCid, err := cid.Cast(tk.Bytes[1:]) + if err != nil { + return err + } + return na.AssignLink(cidlink.Link{elCid}) + default: + return fmt.Errorf("unhandled cbor tag %d", tk.Tag) + } + case tok.TBool: + return na.AssignBool(tk.Bool) + case tok.TInt: + return na.AssignInt(int(tk.Int)) // FIXME overflow check + case tok.TUint: + return na.AssignInt(int(tk.Uint)) // FIXME overflow check + case tok.TFloat64: + return na.AssignFloat(tk.Float64) + default: + panic("unreachable") + } +} diff --git a/vendor/github.com/ipld/go-ipld-prime/doc.go b/vendor/github.com/ipld/go-ipld-prime/doc.go new file mode 100644 index 0000000000..f304558a19 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/doc.go @@ -0,0 +1,50 @@ +// go-ipld-prime is a series of go interfaces for manipulating IPLD data. +// +// See https://github.com/ipld/specs for more information about the basics +// of "What is IPLD?". +// +// See https://github.com/ipld/go-ipld-prime/tree/master/doc/README.md +// for more documentation about go-ipld-prime's architecture and usage. +// +// Here in the godoc, the first couple of types to look at should be: +// +// - Node +// - NodeBuilder (and NodeAssembler) +// +// These types provide a generic description of the data model. +// +// If working with linked data (data which is split into multiple +// trees of Nodes, loaded separately, and connected by some kind of +// "link" reference), the next types you should look at are: +// +// - Link +// - LinkBuilder +// - Loader +// - Storer +// +// All of these types are interfaces. There are several implementations you +// can choose; we've provided some in subpackages, or you can bring your own. +// +// Particularly interesting subpackages include: +// +// - node/* -- various Node + NodeBuilder implementations +// - node/basic -- the first Node implementation you should try +// - codec/* -- functions for serializing and deserializing Nodes +// - linking/* -- various Link + LinkBuilder implementations +// - traversal -- functions for walking Node graphs (including +// automatic link loading) and visiting +// - must -- helpful functions for streamlining error handling +// - fluent -- alternative Node interfaces that flip errors to panics +// - schema -- interfaces for working with IPLD Schemas and Nodes +// which use Schema types and constraints +// +// Note that since interfaces in this package are the core of the library, +// choices made here maximize correctness and performance -- these choices +// are *not* always the choices that would maximize ergonomics. +// (Ergonomics can come on top; performance generally can't.) +// You can check out the 'must' or 'fluent' packages for more ergonomics; +// 'traversal' provides some ergnomics features for certain uses; +// any use of schemas with codegen tooling will provide more ergnomic options; +// or you can make your own function decorators that do what *you* need. +// +package ipld diff --git a/vendor/github.com/ipld/go-ipld-prime/errors.go b/vendor/github.com/ipld/go-ipld-prime/errors.go new file mode 100644 index 0000000000..fec6246a4f --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/errors.go @@ -0,0 +1,157 @@ +package ipld + +import ( + "fmt" +) + +// ErrWrongKind may be returned from functions on the Node interface when +// a method is invoked which doesn't make sense for the Kind and/or ReprKind +// that node concretely contains. +// +// For example, calling AsString on a map will return ErrWrongKind. +// Calling Lookup on an int will similarly return ErrWrongKind. +type ErrWrongKind struct { + // TypeName may optionally indicate the named type of a node the function + // was called on (if the node was typed!), or, may be the empty string. + TypeName string + + // MethodName is literally the string for the operation attempted, e.g. + // "AsString". + // + // For methods on nodebuilders, we say e.g. "NodeBuilder.CreateMap". + MethodName string + + // ApprorpriateKind describes which ReprKinds the erroring method would + // make sense for. + AppropriateKind ReprKindSet + + // ActualKind describes the ReprKind of the node the method was called on. + // + // In the case of typed nodes, this will typically refer to the 'natural' + // data-model kind for such a type (e.g., structs will say 'map' here). + ActualKind ReprKind +} + +func (e ErrWrongKind) Error() string { + if e.TypeName == "" { + return fmt.Sprintf("func called on wrong kind: %s called on a %s node, but only makes sense on %s", e.MethodName, e.ActualKind, e.AppropriateKind) + } else { + return fmt.Sprintf("func called on wrong kind: %s called on a %s node (kind: %s), but only makes sense on %s", e.MethodName, e.TypeName, e.ActualKind, e.AppropriateKind) + } +} + +// ErrNotExists may be returned from the lookup functions of the Node interface +// to indicate a missing value. +// +// Note that schema.ErrNoSuchField is another type of error which sometimes +// occurs in similar places as ErrNotExists. ErrNoSuchField is preferred +// when handling data with constraints provided by a schema that mean that +// a field can *never* exist (as differentiated from a map key which is +// simply absent in some data). +type ErrNotExists struct { + Segment PathSegment +} + +func (e ErrNotExists) Error() string { + return fmt.Sprintf("key not found: %q", e.Segment) +} + +// ErrRepeatedMapKey is an error indicating that a key was inserted +// into a map that already contains that key. +// +// This error may be returned by any methods that add data to a map -- +// any of the methods on a NodeAssembler that was yielded by MapAssembler.AssignKey(), +// or from the MapAssembler.AssignDirectly() method. +type ErrRepeatedMapKey struct { + Key Node +} + +func (e ErrRepeatedMapKey) Error() string { + return fmt.Sprintf("cannot repeat map key (\"%s\")", e.Key) +} + +// ErrInvalidKey indicates a key is invalid for some reason. +// +// This is only possible for typed nodes; specifically, it may show up when +// handling struct types, or maps with interesting key types. +// (Other kinds of key invalidity that happen for untyped maps +// fall under ErrRepeatedMapKey or ErrWrongKind.) +// (Union types use ErrInvalidUnionDiscriminant instead of ErrInvalidKey, +// even when their representation strategy is maplike.) +type ErrInvalidKey struct { + // TypeName will indicate the named type of a node the function was called on. + TypeName string + + // Key is the key that was rejected. + Key Node + + // Reason, if set, may provide details (for example, the reason a key couldn't be converted to a type). + // If absent, it'll be presumed "no such field". + // ErrUnmatchable may show up as a reason for typed maps with complex keys. + Reason error +} + +func (e ErrInvalidKey) Error() string { + if e.Reason == nil { + return fmt.Sprintf("invalid key for map %s: \"%s\": no such field", e.TypeName, e.Key) + } else { + return fmt.Sprintf("invalid key for map %s: \"%s\": %s", e.TypeName, e.Key, e.Reason) + } +} + +// ErrInvalidSegmentForList is returned when using Node.LookupBySegment and the +// given PathSegment can't be applied to a list because it's unparsable as a number. +type ErrInvalidSegmentForList struct { + // TypeName may indicate the named type of a node the function was called on, + // or be empty string if working on untyped data. + TypeName string + + // TroubleSegment is the segment we couldn't use. + TroubleSegment PathSegment + + // Reason may explain more about why the PathSegment couldn't be used; + // in practice, it's probably a 'strconv.NumError'. + Reason error +} + +func (e ErrInvalidSegmentForList) Error() string { + v := "invalid segment for lookup on a list" + if e.TypeName != "" { + v += " of type " + e.TypeName + } + return v + fmt.Sprintf(": %q: %s", e.TroubleSegment.s, e.Reason) +} + +// ErrUnmatchable is the catch-all type for parse errors in schema representation work. +// +// REVIEW: are builders at type level ever going to return this? i don't think so. +// REVIEW: can this ever be triggered during the marshalling direction? perhaps not. +// REVIEW: do things like ErrWrongKind end up being wrapped by this? that doesn't seem pretty. +// REVIEW: do natural representations ever trigger this? i don't think so. maybe that's a hint towards a better name. +// REVIEW: are user validation functions encouraged to return this? or something else? +// +type ErrUnmatchable struct { + // TypeName will indicate the named type of a node the function was called on. + TypeName string + + // Reason must always be present. ErrUnmatchable doesn't say much otherwise. + Reason error +} + +func (e ErrUnmatchable) Error() string { + return fmt.Sprintf("parsing of %s rejected: %s", e.TypeName, e.Reason) +} + +// ErrIteratorOverread is returned when calling 'Next' on a MapIterator or +// ListIterator when it is already done. +type ErrIteratorOverread struct{} + +func (e ErrIteratorOverread) Error() string { + return "iterator overread" +} + +type ErrCannotBeNull struct{} // Review: arguably either ErrInvalidKindForNodePrototype. + +type ErrMissingRequiredField struct{} // only possible for typed nodes -- specifically, struct types. +type ErrListOverrun struct{} // only possible for typed nodes -- specifically, struct types with list (aka tuple) representations. +type ErrInvalidUnionDiscriminant struct{} // only possible for typed nodes -- specifically, union types. diff --git a/vendor/github.com/ipld/go-ipld-prime/fluent/doc.go b/vendor/github.com/ipld/go-ipld-prime/fluent/doc.go new file mode 100644 index 0000000000..86c9768ccb --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/fluent/doc.go @@ -0,0 +1,15 @@ +/* + The fluent package offers helper utilities for using NodeAssembler + more tersely by providing an interface that handles all errors for you, + and allows use of closures for any recursive assembly + so that creating trees of data results in indentation for legibility. + + Note that the fluent package creates wrapper objects in order to provide + the API conveniences that it does, and this comes at some cost to performance. + If you're optimizing for performance, using the fluent interfaces may be inadvisable. + However, as with any performance questions, benchmark before making decisions; + its entirely possible that your performance bottlenecks will be elsewhere + and there's no reason to deny yourself syntactic sugar if the costs don't + detectably affect the bottom line. +*/ +package fluent diff --git a/vendor/github.com/ipld/go-ipld-prime/fluent/fluentBuilder.go b/vendor/github.com/ipld/go-ipld-prime/fluent/fluentBuilder.go new file mode 100644 index 0000000000..6ca269c4d8 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/fluent/fluentBuilder.go @@ -0,0 +1,179 @@ +package fluent + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +func Build(np ipld.NodePrototype, fn func(NodeAssembler)) (ipld.Node, error) { + nb := np.NewBuilder() + fna := WrapAssembler(nb) + err := Recover(func() { + fn(fna) + }) + return nb.Build(), err +} + +func MustBuild(np ipld.NodePrototype, fn func(NodeAssembler)) ipld.Node { + nb := np.NewBuilder() + fn(WrapAssembler(nb)) + return nb.Build() +} +func MustBuildMap(np ipld.NodePrototype, sizeHint int, fn func(MapAssembler)) ipld.Node { + return MustBuild(np, func(fna NodeAssembler) { fna.CreateMap(sizeHint, fn) }) +} +func MustBuildList(np ipld.NodePrototype, sizeHint int, fn func(ListAssembler)) ipld.Node { + return MustBuild(np, func(fna NodeAssembler) { fna.CreateList(sizeHint, fn) }) +} + +func WrapAssembler(na ipld.NodeAssembler) NodeAssembler { + return &nodeAssembler{na} +} + +// NodeAssembler is the same as the interface in the core package, except: +// instead of returning errors, any error will cause panic +// (and you can collect these with `fluent.Recover`); +// and all recursive operations take a function as a parameter, +// within which you will receive another {Map,List,}NodeAssembler. +type NodeAssembler interface { + CreateMap(sizeHint int, fn func(MapAssembler)) + CreateList(sizeHint int, fn func(ListAssembler)) + AssignNull() + AssignBool(bool) + AssignInt(int) + AssignFloat(float64) + AssignString(string) + AssignBytes([]byte) + AssignLink(ipld.Link) + AssignNode(ipld.Node) + + Prototype() ipld.NodePrototype +} + +// MapAssembler is the same as the interface in the core package, except: +// instead of returning errors, any error will cause panic +// (and you can collect these with `fluent.Recover`); +// and all recursive operations take a function as a parameter, +// within which you will receive another {Map,List,}NodeAssembler. +type MapAssembler interface { + AssembleKey() NodeAssembler + AssembleValue() NodeAssembler + + AssembleEntry(k string) NodeAssembler + + KeyPrototype() ipld.NodePrototype + ValuePrototype(k string) ipld.NodePrototype +} + +// ListAssembler is the same as the interface in the core package, except: +// instead of returning errors, any error will cause panic +// (and you can collect these with `fluent.Recover`); +// and all recursive operations take a function as a parameter, +// within which you will receive another {Map,List,}NodeAssembler. +type ListAssembler interface { + AssembleValue() NodeAssembler + + ValuePrototype(idx int) ipld.NodePrototype +} + +type nodeAssembler struct { + na ipld.NodeAssembler +} + +func (fna *nodeAssembler) CreateMap(sizeHint int, fn func(MapAssembler)) { + if ma, err := fna.na.BeginMap(sizeHint); err != nil { + panic(Error{err}) + } else { + fn(&mapNodeAssembler{ma}) + if err := ma.Finish(); err != nil { + panic(Error{err}) + } + } +} +func (fna *nodeAssembler) CreateList(sizeHint int, fn func(ListAssembler)) { + if la, err := fna.na.BeginList(sizeHint); err != nil { + panic(Error{err}) + } else { + fn(&listNodeAssembler{la}) + if err := la.Finish(); err != nil { + panic(Error{err}) + } + } +} +func (fna *nodeAssembler) AssignNull() { + if err := fna.na.AssignNull(); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) AssignBool(v bool) { + if err := fna.na.AssignBool(v); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) AssignInt(v int) { + if err := fna.na.AssignInt(v); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) AssignFloat(v float64) { + if err := fna.na.AssignFloat(v); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) AssignString(v string) { + if err := fna.na.AssignString(v); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) AssignBytes(v []byte) { + if err := fna.na.AssignBytes(v); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) AssignLink(v ipld.Link) { + if err := fna.na.AssignLink(v); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) AssignNode(v ipld.Node) { + if err := fna.na.AssignNode(v); err != nil { + panic(Error{err}) + } +} +func (fna *nodeAssembler) Prototype() ipld.NodePrototype { + return fna.na.Prototype() +} + +type mapNodeAssembler struct { + ma ipld.MapAssembler +} + +func (fma *mapNodeAssembler) AssembleKey() NodeAssembler { + return &nodeAssembler{fma.ma.AssembleKey()} +} +func (fma *mapNodeAssembler) AssembleValue() NodeAssembler { + return &nodeAssembler{fma.ma.AssembleValue()} +} +func (fma *mapNodeAssembler) AssembleEntry(k string) NodeAssembler { + va, err := fma.ma.AssembleEntry(k) + if err != nil { + panic(Error{err}) + } + return &nodeAssembler{va} +} +func (fma *mapNodeAssembler) KeyPrototype() ipld.NodePrototype { + return fma.ma.KeyPrototype() +} +func (fma *mapNodeAssembler) ValuePrototype(k string) ipld.NodePrototype { + return fma.ma.ValuePrototype(k) +} + +type listNodeAssembler struct { + la ipld.ListAssembler +} + +func (fla *listNodeAssembler) AssembleValue() NodeAssembler { + return &nodeAssembler{fla.la.AssembleValue()} +} +func (fla *listNodeAssembler) ValuePrototype(idx int) ipld.NodePrototype { + return fla.la.ValuePrototype(idx) +} diff --git a/vendor/github.com/ipld/go-ipld-prime/fluent/fluentRecover.go b/vendor/github.com/ipld/go-ipld-prime/fluent/fluentRecover.go new file mode 100644 index 0000000000..dec9693a86 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/fluent/fluentRecover.go @@ -0,0 +1,30 @@ +package fluent + +type Error struct { + Err error +} + +func (e Error) Error() string { + return e.Err.Error() +} + +// Recover invokes a function within a panic-recovering context, and returns +// any raised fluent.Error values; any other values are re-panicked. +// +// This can be useful for writing large blocks of code using fluent nodes, +// and handling any errors at once at the end. +func Recover(fn func()) (err error) { + defer func() { + ei := recover() + switch e2 := ei.(type) { + case nil: + return + case Error: + err = e2 + default: + panic(ei) + } + }() + fn() + return +} diff --git a/vendor/github.com/ipld/go-ipld-prime/go.mod b/vendor/github.com/ipld/go-ipld-prime/go.mod new file mode 100644 index 0000000000..8d6428852d --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/go.mod @@ -0,0 +1,14 @@ +module github.com/ipld/go-ipld-prime + +go 1.14 + +require ( + github.com/ipfs/go-cid v0.0.4 + github.com/minio/sha256-simd v0.1.1 // indirect + github.com/mr-tron/base58 v1.1.3 // indirect + github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 + github.com/smartystreets/goconvey v1.6.4 // indirect + github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a + golang.org/x/crypto v0.0.0-20200117160349-530e935923ad // indirect + golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 // indirect +) diff --git a/vendor/github.com/ipld/go-ipld-prime/go.sum b/vendor/github.com/ipld/go-ipld-prime/go.sum new file mode 100644 index 0000000000..bc17d60f18 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/go.sum @@ -0,0 +1,43 @@ +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/ipfs/go-cid v0.0.4 h1:UlfXKrZx1DjZoBhQHmNHLC1fK1dUJDN20Y28A7s+gJ8= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad h1:Jh8cai0fqIK+f6nG0UgPW5wFk8wmiMhM3AyciDBdtQg= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/ipld/go-ipld-prime/kind.go b/vendor/github.com/ipld/go-ipld-prime/kind.go new file mode 100644 index 0000000000..9f53ae4784 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/kind.go @@ -0,0 +1,87 @@ +package ipld + +// ReprKind represents the primitive kind in the IPLD data model. +// All of these kinds map directly onto serializable data. +// +// Note that ReprKind contains the concept of "map", but not "struct" +// or "object" -- those are a concepts that could be introduced in a +// type system layers, but are *not* present in the data model layer, +// and therefore they aren't included in the ReprKind enum. +type ReprKind uint8 + +const ( + ReprKind_Invalid ReprKind = 0 + ReprKind_Map ReprKind = '{' + ReprKind_List ReprKind = '[' + ReprKind_Null ReprKind = '0' + ReprKind_Bool ReprKind = 'b' + ReprKind_Int ReprKind = 'i' + ReprKind_Float ReprKind = 'f' + ReprKind_String ReprKind = 's' + ReprKind_Bytes ReprKind = 'x' + ReprKind_Link ReprKind = '/' +) + +func (k ReprKind) String() string { + switch k { + case ReprKind_Invalid: + return "INVALID" + case ReprKind_Map: + return "map" + case ReprKind_List: + return "list" + case ReprKind_Null: + return "null" + case ReprKind_Bool: + return "bool" + case ReprKind_Int: + return "int" + case ReprKind_Float: + return "float" + case ReprKind_String: + return "string" + case ReprKind_Bytes: + return "bytes" + case ReprKind_Link: + return "link" + default: + panic("invalid enumeration value!") + } +} + +// ReprKindSet is a type with a few enumerated consts that are commonly used +// (mostly, in error messages). +type ReprKindSet []ReprKind + +var ( + ReprKindSet_Recursive = ReprKindSet{ReprKind_Map, ReprKind_List} + ReprKindSet_Scalar = ReprKindSet{ReprKind_Null, ReprKind_Bool, ReprKind_Int, ReprKind_Float, ReprKind_String, ReprKind_Bytes, ReprKind_Link} + + ReprKindSet_JustMap = ReprKindSet{ReprKind_Map} + ReprKindSet_JustList = ReprKindSet{ReprKind_List} + ReprKindSet_JustNull = ReprKindSet{ReprKind_Null} + ReprKindSet_JustBool = ReprKindSet{ReprKind_Bool} + ReprKindSet_JustInt = ReprKindSet{ReprKind_Int} + ReprKindSet_JustFloat = ReprKindSet{ReprKind_Float} + ReprKindSet_JustString = ReprKindSet{ReprKind_String} + ReprKindSet_JustBytes = ReprKindSet{ReprKind_Bytes} + ReprKindSet_JustLink = ReprKindSet{ReprKind_Link} +) + +func (x ReprKindSet) String() string { + s := "" + for i := 0; i < len(x)-1; i++ { + s += x[i].String() + " or " + } + s += x[len(x)-1].String() + return s +} + +func (x ReprKindSet) Contains(e ReprKind) bool { + for _, v := range x { + if v == e { + return true + } + } + return false +} diff --git a/vendor/github.com/ipld/go-ipld-prime/linking.go b/vendor/github.com/ipld/go-ipld-prime/linking.go new file mode 100644 index 0000000000..8ccc40e93c --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/linking.go @@ -0,0 +1,144 @@ +package ipld + +import ( + "context" + "io" +) + +// Link is a special kind of value in IPLD which can be "loaded" to access +// more nodes. +// +// Nodes can return a Link; this can be loaded manually, or, +// the traversal package contains powerful features for automatically +// traversing links through large trees of nodes. +// +// Links straddle somewhat awkwardly across the IPLD Layer Model: +// clearly not at the Schema layer (though schemas can define their parameters), +// partially at the Data Model layer (as they're recognizably in the Node interface), +// and also involved at some serial layer that we don't often talk about: +// linking -- since we're a content-addressed system at heart -- necessarily +// involves understanding of concrete serialization details: +// which encoding mechanisms to use, what string escaping, what hashing, etc, +// and indeed what concrete serial link representation itself to use. +// +// Link is an abstract interface so that we can describe Nodes without +// getting stuck on specific details of any link representation. +// In practice, you'll almost certainly use CIDs for linking. +// However, it's possible to bring your own Link implementations +// (though this'll almost certainly involve also bringing your own encoding +// systems; it's a lot of work). +// It's even possible to use IPLD *entirely without* any linking implementation, +// using it purely for json/cbor via the encoding packages and +// foregoing the advanced traversal features around transparent link loading. +type Link interface { + // Load consumes serial data from a Loader and funnels the parsed + // data into a NodeAssembler. + // + // The provided Loader function is used to get a reader for the raw + // serialized content; the Link contains an understanding of how to + // select a decoder (and hasher for verification, etc); and the + // NodeAssembler accumulates the final results (which you can + // presumably access from elsewhere; Load is designed not to know + // about this). + Load(context.Context, LinkContext, NodeAssembler, Loader) error + + // LinkBuilder returns a handle to any parameters of the Link which + // are needed to create a new Link of the same style but with new content. + // (It's much like the relationship of Node/NodeBuilder.) + // + // (If you're familiar with CIDs, you can think of this method as + // corresponding closely to `cid.Prefix()`, just more abstractly.) + LinkBuilder() LinkBuilder + + // String should return a reasonably human-readable debug-friendly + // representation of a Link. It should only be used for debug and + // log message purposes; there is no contract that requires that the + // string be able to be parsed back into a reified Link. + String() string +} + +// LinkBuilder encapsulates any implementation details and parameters +// necessary for taking a Node and converting it to a serial representation +// and returning a Link to that data. +// +// The serialized bytes will be routed through the provided Storer system, +// which is expected to store them in some way such that a related Loader +// system can later use the Link and an associated Loader to load nodes +// of identical content. +// +// LinkBuilder, like Link, is an abstract interface. +// If using CIDs as an implementation, LinkBuilder will encapsulate things +// like multihashType, multicodecType, and cidVersion, for example. +type LinkBuilder interface { + Build(context.Context, LinkContext, Node, Storer) (Link, error) +} + +// Loader functions are used to get a reader for raw serialized content +// based on the lookup information in a Link. +// A loader function is used by providing it to a Link.Load() call. +// +// Loaders typically have some filesystem or database handle contained +// within their closure which is used to satisfy read operations. +// +// LinkContext objects can be provided to give additional information +// to the loader, and will be automatically filled out when a Loader +// is used by systems in the traversal package; most Loader implementations +// should also work fine when given the zero value of LinkContext. +// +// Loaders are implicitly coupled to a Link implementation and have some +// "extra" knowledge of the concrete Link type. This necessary since there is +// no mandated standard for how to serially represent Link itself, and such +// a representation is typically needed by a Storer implementation. +type Loader func(lnk Link, lnkCtx LinkContext) (io.Reader, error) + +// Storer functions are used to a get a writer for raw serialized content, +// which will be committed to storage indexed by Link. +// A stoerer function is used by providing it to a LinkBuilder.Build() call. +// +// The storer system comes in two parts: the Storer itself *starts* a storage +// operation (presumably to some e.g. tempfile) and returns a writer; the +// StoreCommitter returned with the writer is used to *commit* the final storage +// (much like a 'Close' operation for the writer). +// +// Storers typically have some filesystem or database handle contained +// within their closure which is used to satisfy read operations. +// +// LinkContext objects can be provided to give additional information +// to the storer, and will be automatically filled out when a Storer +// is used by systems in the traversal package; most Storer implementations +// should also work fine when given the zero value of LinkContext. +// +// Storers are implicitly coupled to a Link implementation and have some +// "extra" knowledge of the concrete Link type. This necessary since there is +// no mandated standard for how to serially represent Link itself, and such +// a representation is typically needed by a Storer implementation. +type Storer func(lnkCtx LinkContext) (io.Writer, StoreCommitter, error) + +// StoreCommitter is a thunk returned by a Storer which is used to "commit" +// the storage operation. It should be called after the associated writer +// is finished, similar to a 'Close' method, but further takes a Link parameter, +// which is the identity of the content. Typically, this will cause an atomic +// operation in the storage system to move the already-written content into +// a final place (e.g. rename a tempfile) determined by the Link. +// (The Link parameter is necessarily only given at the end of the process +// rather than the beginning to so that we can have content-addressible +// semantics while also supporting streaming writes.) +type StoreCommitter func(Link) error + +// LinkContext is a parameter to Storer and Loader functions. +// +// An example use of LinkContext might be inspecting the LinkNode, and if +// it's a typed node, inspecting its Type property; then, a Loader might +// deciding on whether or not we want to load objects of that Type. +// This might be used to do a traversal which looks at all directory objects, +// but not file contents, for example. +type LinkContext struct { + LinkPath Path + LinkNode Node // has the Link again, but also might have type info // always zero for writing new nodes, for obvi reasons. + ParentNode Node +} + +// n.b. if I had java, this would all indeed be generic: +// `Link<$T>`, `LinkBuilder<$T>`, `Storer<$T>`, etc would be an explicit family. +// ... Then again, in java, that'd prevent composition of a Storer or Loader +// which could support more than one concrete type, so. ¯\_(ツ)_/¯ diff --git a/vendor/github.com/ipld/go-ipld-prime/linking/cid/cidLink.go b/vendor/github.com/ipld/go-ipld-prime/linking/cid/cidLink.go new file mode 100644 index 0000000000..b591e4b3e6 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/linking/cid/cidLink.go @@ -0,0 +1,110 @@ +package cidlink + +import ( + "bytes" + "context" + "fmt" + "io" + + cid "github.com/ipfs/go-cid" + ipld "github.com/ipld/go-ipld-prime" +) + +var ( + _ ipld.Link = Link{} + _ ipld.LinkBuilder = LinkBuilder{} +) + +type Link struct { + cid.Cid +} + +// byteAccessor is a reader interface that can access underlying bytes +type byteAccesor interface { + Bytes() []byte +} + +func (lnk Link) Load(ctx context.Context, lnkCtx ipld.LinkContext, na ipld.NodeAssembler, loader ipld.Loader) error { + // Open the byte reader. + r, err := loader(lnk, lnkCtx) + if err != nil { + return err + } + // Tee into hash checking and unmarshalling. + mcDecoder, exists := multicodecDecodeTable[lnk.Prefix().Codec] + if !exists { + return fmt.Errorf("no decoder registered for multicodec %d", lnk.Prefix().Codec) + } + var hasherBytes []byte + var decodeErr error + byteBuf, ok := r.(byteAccesor) + if ok { + hasherBytes = byteBuf.Bytes() + decodeErr = mcDecoder(na, r) + } else { + var hasher bytes.Buffer // multihash only exports bulk use, which is... really inefficient and should be fixed. + decodeErr = mcDecoder(na, io.TeeReader(r, &hasher)) + // Error checking order here is tricky. + // If decoding errored out, we should still run the reader to the end, to check the hash. + // (We still don't implement this by running the hash to the end first, because that would increase the high-water memory requirement.) + // ((Which we experience right now anyway because multihash's interface is silly, but we're acting as if that's fixed or will be soon.)) + // If the hash is rejected, we should return that error (and even if there was a decodeErr, it becomes irrelevant). + if decodeErr != nil { + _, err := io.Copy(&hasher, r) + if err != nil { + return err + } + } + hasherBytes = hasher.Bytes() + } + + cid, err := lnk.Prefix().Sum(hasherBytes) + if err != nil { + return err + } + if cid != lnk.Cid { + return fmt.Errorf("hash mismatch! %q (actual) != %q (expected)", cid, lnk.Cid) + } + if decodeErr != nil { + return decodeErr + } + return nil +} +func (lnk Link) LinkBuilder() ipld.LinkBuilder { + return LinkBuilder{lnk.Cid.Prefix()} +} +func (lnk Link) String() string { + return lnk.Cid.String() +} + +type LinkBuilder struct { + cid.Prefix +} + +func (lb LinkBuilder) Build(ctx context.Context, lnkCtx ipld.LinkContext, node ipld.Node, storer ipld.Storer) (ipld.Link, error) { + // Open the byte writer. + w, commit, err := storer(lnkCtx) + if err != nil { + return nil, err + } + // Marshal, teeing into the storage writer and the hasher. + mcEncoder, exists := multicodecEncodeTable[lb.Prefix.Codec] + if !exists { + return nil, fmt.Errorf("no encoder registered for multicodec %d", lb.Prefix.Codec) + } + var hasher bytes.Buffer // multihash-via-cid only exports bulk use, which is... really inefficient and should be fixed. + w = io.MultiWriter(&hasher, w) + err = mcEncoder(node, w) + if err != nil { + return nil, err + } + cid, err := lb.Prefix.Sum(hasher.Bytes()) + if err != nil { + return nil, err + } + lnk := Link{cid} + if err := commit(lnk); err != nil { + return lnk, err + } + return lnk, nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/linking/cid/multicodec.go b/vendor/github.com/ipld/go-ipld-prime/linking/cid/multicodec.go new file mode 100644 index 0000000000..510e041177 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/linking/cid/multicodec.go @@ -0,0 +1,42 @@ +package cidlink + +import ( + "io" + + ipld "github.com/ipld/go-ipld-prime" +) + +type MulticodecDecodeTable map[uint64]MulticodecDecoder + +type MulticodecEncodeTable map[uint64]MulticodecEncoder + +// MulticodecDecoder builds an ipld.Node by unmarshalling bytes and funnelling +// the data tree into an ipld.NodeAssembler. The resulting Node is not +// returned; typically you call this function with an ipld.NodeBuilder, +// and you can extract the result from there. +// +// MulticodecDecoder are used by registering them in a MulticodecDecoderTable, +// which makes them available to be used internally by cidlink.Link.Load. +// +// Consider implementing decoders to probe their NodeBuilder to see if it +// has special features that may be able to do the job more efficiently. +// For example, ipldcbor.NodeBuilder has special unmarshaller functions +// that know how to fastpath their work *if* we're doing a cbor decode; +// if possible, detect and use that; if not, fall back to general generic +// NodeBuilder usage. +type MulticodecDecoder func(ipld.NodeAssembler, io.Reader) error + +// MulticodecEncoder marshals and ipld.Node into bytes and sends them to +// an io.Writer. +// +// MulticodecEncoder are used by registering them in a MulticodecEncoderTable, +// which makes them available to be used internally by cidlink.LinkBuilder. +// +// Tends to be implemented by probing the node to see if it matches a special +// interface that we know can do this particular kind of encoding +// (e.g. if you're using ipldgit.Node and making a MulticodecEncoder to register +// as the rawgit multicodec, you'll probe for that specific thing, since it's +// implemented on the node itself), +// but may also be able to work based on the ipld.Node interface alone +// (e.g. you can do dag-cbor to any kind of Node). +type MulticodecEncoder func(ipld.Node, io.Writer) error diff --git a/vendor/github.com/ipld/go-ipld-prime/linking/cid/multicodecRegistry.go b/vendor/github.com/ipld/go-ipld-prime/linking/cid/multicodecRegistry.go new file mode 100644 index 0000000000..f24f8decca --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/linking/cid/multicodecRegistry.go @@ -0,0 +1,35 @@ +package cidlink + +import "fmt" + +var ( + multicodecDecodeTable MulticodecDecodeTable + multicodecEncodeTable MulticodecEncodeTable +) + +func init() { + multicodecEncodeTable = make(MulticodecEncodeTable) + multicodecDecodeTable = make(MulticodecDecodeTable) +} + +// RegisterMulticodecDecoder is used to register multicodec features. +// It adjusts a global registry and may only be used at program init time; +// it is meant to provide a plugin system, not a configuration mechanism. +func RegisterMulticodecDecoder(hook uint64, fn MulticodecDecoder) { + _, exists := multicodecDecodeTable[hook] + if exists { + panic(fmt.Errorf("multicodec decoder already registered for %x", hook)) + } + multicodecDecodeTable[hook] = fn +} + +// RegisterMulticodecEncoder is used to register multicodec features. +// It adjusts a global registry and may only be used at program init time; +// it is meant to provide a plugin system, not a configuration mechanism. +func RegisterMulticodecEncoder(hook uint64, fn MulticodecEncoder) { + _, exists := multicodecEncodeTable[hook] + if exists { + panic(fmt.Errorf("multicodec encoder already registered for %x", hook)) + } + multicodecEncodeTable[hook] = fn +} diff --git a/vendor/github.com/ipld/go-ipld-prime/module.tl b/vendor/github.com/ipld/go-ipld-prime/module.tl new file mode 100644 index 0000000000..b074417d16 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/module.tl @@ -0,0 +1,24 @@ +{ + "imports": { + "base": "catalog:early.polydawn.io/monolith/busybash:v1:linux-amd64", + "go": "catalog:early.hyphae.polydawn.io/go:v1.10:linux-amd64", + "src": "ingest:git:.:HEAD" + }, + "steps": { + "test": { + "operation": { + "inputs": { + "/": "base" + "/app/go": "go" + "/task": "src" + }, + "action": { + "exec": [ + "/bin/bash", "-c", + "export PATH=$PATH:/app/go/go/bin && export GOPATH=$PWD/.gopath && go test -tags 'skipgenbehavtests' ./..." + ] + } + } + } + } +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node.go b/vendor/github.com/ipld/go-ipld-prime/node.go new file mode 100644 index 0000000000..74ac535caa --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node.go @@ -0,0 +1,282 @@ +package ipld + +// Node represents a value in IPLD. Any point in a tree of data is a node: +// scalar values (like int, string, etc) are nodes, and +// so are recursive values (like map and list). +// +// Nodes and kinds are described in the IPLD specs at +// https://github.com/ipld/specs/blob/master/data-model-layer/data-model.md . +// +// Methods on the Node interface cover the superset of all possible methods for +// all possible kinds -- but some methods only make sense for particular kinds, +// and thus will only make sense to call on values of the appropriate kind. +// (For example, 'Length' on an int doesn't make sense, +// and 'AsInt' on a map certainly doesn't work either!) +// Use the ReprKind method to find out the kind of value before +// calling kind-specific methods. +// Individual method documentation state which kinds the method is valid for. +// (If you're familiar with the stdlib reflect package, you'll find +// the design of the Node interface very comparable to 'reflect.Value'.) +// +// The Node interface is read-only. All of the methods on the interface are +// for examining values, and implementations should be immutable. +// The companion interface, NodeBuilder, provides the matching writable +// methods, and should be use to create a (thence immutable) Node. +// +// Keeping Node immutable and separating mutation into NodeBuilder makes +// it possible to perform caching (or rather, memoization, since there's no +// such thing as cache invalidation for immutable systems) of computed +// properties of Node; use copy-on-write algorithms for memory efficiency; +// and to generally build pleasant APIs. +// Many library functions will rely on the immutability of Node (e.g., +// assuming that pointer-equal nodes do not change in value over time), +// so any user-defined Node implementations should be careful to uphold +// the immutability contract.) +// +// There are many different concrete types which implement Node. +// The primary purpose of various node implementations is to organize +// memory in the program in different ways -- some in-memory layouts may +// be more optimal for some programs than others, and changing the Node +// (and NodeBuilder) implementations lets the programmer choose. +// +// For concrete implementations of Node, check out the "./node/" folder, +// and the packages within it. +// "node/basic" should probably be your first start; the Node and NodeBuilder +// implementations in that package work for any data. +// Other packages are optimized for specific use-cases. +// Codegen tools can also be used to produce concrete implementations of Node; +// these may be specific to certain data, but still conform to the Node +// interface for interoperability and to support higher-level functions. +// +// Nodes may also be *typed* -- see the 'schema' package and `schema.TypedNode` +// interface, which extends the Node interface with additional methods. +// Typed nodes have additional constraints and behaviors: +// for example, they may be a "struct" and have a specific type/structure +// to what data you can put inside them, but still behave as a regular Node +// in all ways this interface specifies (so you can traverse typed nodes, etc, +// without any additional special effort). +type Node interface { + // ReprKind returns a value from the ReprKind enum describing what the + // essential serializable kind of this node is (map, list, int, etc). + // Most other handling of a node requires first switching upon the kind. + ReprKind() ReprKind + + // LookupByString looks up a child object in this node and returns it. + // The returned Node may be any of the ReprKind: + // a primitive (string, int, etc), a map, a list, or a link. + // + // If the Kind of this Node is not ReprKind_Map, a nil node and an error + // will be returned. + // + // If the key does not exist, a nil node and an error will be returned. + LookupByString(key string) (Node, error) + + // LookupByNode is the equivalent of LookupByString, but takes a reified Node + // as a parameter instead of a plain string. + // This mechanism is useful if working with typed maps (if the key types + // have constraints, and you already have a reified `schema.TypedNode` value, + // using that value can save parsing and validation costs); + // and may simply be convenient if you already have a Node value in hand. + // + // (When writing generic functions over Node, a good rule of thumb is: + // when handling a map, check for `schema.TypedNode`, and in this case prefer + // the LookupByNode(Node) method; otherwise, favor LookupByString; typically + // implementations will have their fastest paths thusly.) + LookupByNode(key Node) (Node, error) + + // LookupByIndex is the equivalent of LookupByString but for indexing into a list. + // As with LookupByString, the returned Node may be any of the ReprKind: + // a primitive (string, int, etc), a map, a list, or a link. + // + // If the Kind of this Node is not ReprKind_List, a nil node and an error + // will be returned. + // + // If idx is out of range, a nil node and an error will be returned. + LookupByIndex(idx int) (Node, error) + + // LookupBySegment is will act as either LookupByString or LookupByIndex, + // whichever is contextually appropriate. + // + // Using LookupBySegment may imply an "atoi" conversion if used on a list node, + // or an "itoa" conversion if used on a map node. If an "itoa" conversion + // takes place, it may error, and this method may return that error. + LookupBySegment(seg PathSegment) (Node, error) + + // Note that when using codegenerated types, there may be a fifth variant + // of lookup method on maps: `Get($GeneratedTypeKey) $GeneratedTypeValue`! + + // MapIterator returns an iterator which yields key-value pairs + // traversing the node. + // If the node kind is anything other than a map, nil will be returned. + // + // The iterator will yield every entry in the map; that is, it + // can be expected that itr.Next will be called node.Length times + // before itr.Done becomes true. + MapIterator() MapIterator + + // ListIterator returns an iterator which yields key-value pairs + // traversing the node. + // If the node kind is anything other than a list, nil will be returned. + // + // The iterator will yield every entry in the list; that is, it + // can be expected that itr.Next will be called node.Length times + // before itr.Done becomes true. + ListIterator() ListIterator + + // Length returns the length of a list, or the number of entries in a map, + // or -1 if the node is not of list nor map kind. + Length() int + + // Absent nodes are returned when traversing a struct field that is + // defined by a schema but unset in the data. (Absent nodes are not + // possible otherwise; you'll only see them from `schema.TypedNode`.) + // The absent flag is necessary so iterating over structs can + // unambiguously make the distinction between values that are + // present-and-null versus values that are absent. + // + // Absent nodes respond to `ReprKind()` as `ipld.ReprKind_Null`, + // for lack of any better descriptive value; you should therefore + // always check IsAbsent rather than just a switch on kind + // when it may be important to handle absent values distinctly. + IsAbsent() bool + + IsNull() bool + AsBool() (bool, error) + AsInt() (int, error) + AsFloat() (float64, error) + AsString() (string, error) + AsBytes() ([]byte, error) + AsLink() (Link, error) + + // Prototype returns a NodePrototype which can describe some properties of this node's implementation, + // and also be used to get a NodeBuilder, + // which can be use to create new nodes with the same implementation as this one. + // + // For typed nodes, the NodePrototype will also implement schema.Type. + // + // For Advanced Data Layouts, the NodePrototype will encapsulate any additional + // parameters and configuration of the ADL, and will also (usually) + // implement NodePrototypeSupportingAmend. + // + // Calling this method should not cause an allocation. + Prototype() NodePrototype +} + +// NodePrototype describes a node implementation (all Node have a NodePrototype), +// and a NodePrototype can always be used to get a NodeBuilder. +// +// A NodePrototype may also provide other information about implementation; +// such information is specific to this library ("prototype" isn't a concept +// you'll find in the IPLD Specifications), and is usually provided through +// feature-detection interfaces (for example, see NodePrototypeSupportingAmend). +// +// Generic algorithms for working with IPLD Nodes make use of NodePrototype +// to get builders for new nodes when creating data, and can also use the +// feature-detection interfaces to help decide what kind of operations +// will be optimal to use on a given node implementation. +// +// Note that NodePrototype is not the same as schema.Type. +// NodePrototype is a (golang-specific!) way to reflect upon the implementation +// and in-memory layout of some IPLD data. +// schema.Type is information about how a group of nodes is related in a schema +// (if they have one!) and the rules that the type mandates the node must follow. +// (Every node must have a prototype; but schema types are an optional feature.) +type NodePrototype interface { + // NewBuilder returns a NodeBuilder that can be used to create a new Node. + // + // Note that calling NewBuilder often performs an allocation + // (while in contrast, getting a NodePrototype typically does not!) -- + // this may be consequential when writing high performance code. + NewBuilder() NodeBuilder +} + +// NodePrototypeSupportingAmend is a feature-detection interface that can be +// used on a NodePrototype to see if it's possible to build new nodes of this style +// while sharing some internal data in a copy-on-write way. +// +// For example, Nodes using an Advanced Data Layout will typically +// support this behavior, and since ADLs are often used for handling large +// volumes of data, detecting and using this feature can result in significant +// performance savings. +type NodePrototypeSupportingAmend interface { + AmendingBuilder(base Node) NodeBuilder + // FUTURE: probably also needs a `AmendingWithout(base Node, filter func(k,v) bool) NodeBuilder`, or similar. + // ("deletion" based APIs are also possible but both more complicated in interfaces added, and prone to accidentally quadratic usage.) + // FUTURE: there should be some stdlib `Copy` (?) methods that automatically look for this feature, and fallback if absent. + // Might include a wide range of point `Transform`, etc, methods. + // FUTURE: consider putting this (and others like it) in a `feature` package, if there begin to be enough of them and docs get crowded. +} + +// MapIterator is an interface for traversing map nodes. +// Sequential calls to Next() will yield key-value pairs; +// Done() describes whether iteration should continue. +// +// Iteration order is defined to be stable: two separate MapIterator +// created to iterate the same Node will yield the same key-value pairs +// in the same order. +// The order itself may be defined by the Node implementation: some +// Nodes may retain insertion order, and some may return iterators which +// always yield data in sorted order, for example. +type MapIterator interface { + // Next returns the next key-value pair. + // + // An error value can also be returned at any step: in the case of advanced + // data structures with incremental loading, it's possible to encounter + // cancellation or I/O errors at any point in iteration. + // If an error is returned, the boolean will always be false (so it's + // correct to check the bool first and short circuit to continuing if true). + // If an error is returned, the key and value may be nil. + Next() (key Node, value Node, err error) + + // Done returns false as long as there's at least one more entry to iterate. + // When Done returns true, iteration can stop. + // + // Note when implementing iterators for advanced data layouts (e.g. more than + // one chunk of backing data, which is loaded incrementally): if your + // implementation does any I/O during the Done method, and it encounters + // an error, it must return 'false', so that the following Next call + // has an opportunity to return the error. + Done() bool +} + +// ListIterator is an interface for traversing list nodes. +// Sequential calls to Next() will yield index-value pairs; +// Done() describes whether iteration should continue. +// +// A loop which iterates from 0 to Node.Length is a valid +// alternative to using a ListIterator. +type ListIterator interface { + // Next returns the next index and value. + // + // An error value can also be returned at any step: in the case of advanced + // data structures with incremental loading, it's possible to encounter + // cancellation or I/O errors at any point in iteration. + // If an error is returned, the boolean will always be false (so it's + // correct to check the bool first and short circuit to continuing if true). + // If an error is returned, the key and value may be nil. + Next() (idx int, value Node, err error) + + // Done returns false as long as there's at least one more entry to iterate. + // When Done returns false, iteration can stop. + // + // Note when implementing iterators for advanced data layouts (e.g. more than + // one chunk of backing data, which is loaded incrementally): if your + // implementation does any I/O during the Done method, and it encounters + // an error, it must return 'false', so that the following Next call + // has an opportunity to return the error. + Done() bool +} + +// REVIEW: immediate-mode AsBytes() method (as opposed to e.g. returning +// an io.Reader instance) might be problematic, esp. if we introduce +// AdvancedLayouts which support large bytes natively. +// +// Probable solution is having both immediate and iterator return methods. +// Returning a reader for bytes when you know you want a slice already +// is going to be high friction without purpose in many common uses. +// +// Unclear what SetByteStream() would look like for advanced layouts. +// One could try to encapsulate the chunking entirely within the advlay +// node impl... but would it be graceful? Not sure. Maybe. Hopefully! +// Yes? The advlay impl would still tend to use SetBytes for the raw +// data model layer nodes its composing, so overall, it shakes out nicely. diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/HACKME.md b/vendor/github.com/ipld/go-ipld-prime/node/basic/HACKME.md new file mode 100644 index 0000000000..bf2f74a9fc --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/HACKME.md @@ -0,0 +1,146 @@ +hackme +====== + +Design rationale are documented here. + +This doc is not necessary reading for users of this package, +but if you're considering submitting patches -- or just trying to understand +why it was written this way, and check for reasoning that might be dated -- +then it might be useful reading. + +### scalars are just typedefs + +This is noteworthy because in codegen, this is typically *not* the case: +in codegen, even scalar types are boxed in a struct, such that it prevents +casting values into those types. + +This casting is not a concern for the node implementations in this package, because + +- A) we don't have any kind of validation rules to make such casting worrying; and +- B) since our types are unexported, casting is still blocked by this anyway. + +### about builders for scalars + +The assembler types for scalars (string, int, etc) are pretty funny-looking. +You might wish to make them work without any state at all! + +The reason this doesn't fly is that we have to keep the "wip" value in hand +just long enough to return it from the `NodeBuilder.Build` method -- the +`NodeAssembler` contract for `Assign*` methods doesn't permit just returning +their results immediately. + +(Another possible reason is if we expected to use these assemblers on +slab-style allocations (say, `[]plainString`)... +however, this is inapplicable at present, because +A) we don't (except places that have special-case internal paths anyway); and +B) the types aren't exported, so users can't either.) + +Does this mean that using `NodeBuilder` for scalars has a completely +unnecessary second allocation, which is laughably inefficient? Yes. +It's unfortunate the interfaces constrain us to this. +**But**: one typically doesn't actually use builders for scalars much; +they're just here for completeness. +So this is less of a problem in practice than it might at first seem. + +More often, one will use the "any" builder (which is has a whole different set +of design constraints and tradeoffs); +or, if one is writing code and knows which scalar they need, the exported +direct constructor function for that kind +(e.g., `String("foo")` instead of `Prototype__String{}.NewBuilder().AssignString("foo")`) +will do the right thing and do it in one allocation (and it's less to type, too). + +### maps and list keyAssembler and valueAssemblers have custom scalar handling + +Related to the above heading. + +Maps and lists in this package do their own internal handling of scalars, +using unexported features inside the package, because they can more efficient. + +### when to invalidate the 'w' pointers + +The 'w' pointer -- short for 'wip' node pointer -- has an interesting lifecycle. + +In a NodeAssembler, the 'w' pointer should be intialized before the assembler is used. +This means either the matching NodeBuilder type does so; or, +if we're inside recursive structure, the parent assembler did so. + +The 'w' pointer is used throughout the life of the assembler. + +Setting the 'w' pointer to nil is one of two mechanisms used internally +to mark that assembly has become "finished" (the other mechanism is using +an internal state enum field). +Setting the 'w' pointer to nil has two advantages: +one is that it makes it *impossible* to continue to mutate the target node; +the other is that we need no *additional* memory to track this state change. +However, we can't use the strategy of nilling 'w' in all cases: in particular, +when in the NodeBuilder at the root of some construction, +we need to continue to hold onto the node between when it becomes "finished" +and when Build is called; otherwise we can't actually return the value! +Different stratgies are therefore used in different parts of this package. + +Maps and lists use an internal state enum, because they already have one, +and so they might as well; there's no additional cost to this. +Since they can use this state to guard against additional mutations after "finish", +the map and list assemblers don't bother to nil their own 'w' at all. + +During recursion to assemble values _inside_ maps and lists, it's interesting: +the child assembler wrapper type takes reponsibility for nilling out +the 'w' pointer in the child assembler's state, doing this at the same time as +it updates the parent's state machine to clear proceeding with the next entry. + +In the case of scalars at the root of a build, we took a shortcut: +we actually don't fence against repeat mutations at all. +*You can actually use the assign method more than once*. +We can do this without breaking safety contracts because the scalars +all have a pass-by-value phase somewhere in their lifecycle +(calling `nb.AssignString("x")`, then `n := nb.Build()`, then `nb.AssignString("y")` +won't error if `nb` is a freestanding builder for strings... but it also +won't result in mutating `n` to contain `"y"`, so overall, it's safe). + +We could normalize the case with scalars at the root of a tree so that they +error more aggressively... but currently we haven't bothered, since this would +require adding another piece of memory to the scalar builders; and meanwhile +we're not in trouble on compositional correctness. + +Note that these remarks are for the `basicnode` package, but may also +apply to other implementations too (e.g., our codegen output follows similar +overall logic). + +### NodePrototypes are available through a singleton + +Every NodePrototype available from this package is exposed as a field +in a struct of which there's one public exported instance available, +called 'Prototype'. + +This means you can use it like this: + +```go +nbm := basicnode.Prototype.Map.NewBuilder() +nbs := basicnode.Prototype.String.NewBuilder() +nba := basicnode.Prototype.Any.NewBuilder() +// etc +``` + +(If you're interested in the performance of this: it's free! +Methods called at the end of the chain are inlinable. +Since all of the types of the structures on the way there are zero-member +structs, the compiler can effectively treat them as constants, +and thus freely elide any memory dereferences that would +otherwise be necessary to get methods on such a value.) + +### NodePrototypes are (also) available as exported concrete types + +The 'Prototype' singleton is one way to access the NodePrototype in this package; +their exported types are another equivalent way. + +```go +basicnode.Prototype.Map = basicnode.Prototype__Map{} +``` + +It is recommended to use the singleton style; +they compile to identical assembly, and the singleton is syntactically prettier. + +We may make these concrete types unexported in the future. +A decision on this is deferred until some time has passed and +we can accumulate reasonable certainty that there's no need for an exported type +(such as type assertions, etc). diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/any.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/any.go new file mode 100644 index 0000000000..970f98f92e --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/any.go @@ -0,0 +1,200 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +var ( + //_ ipld.Node = &anyNode{} + _ ipld.NodePrototype = Prototype__Any{} + _ ipld.NodeBuilder = &anyBuilder{} + //_ ipld.NodeAssembler = &anyAssembler{} +) + +// anyNode is a union meant for alloc amortization; see anyAssembler. +// Note that anyBuilder doesn't use anyNode, because it's not aiming to amortize anything. +// +// REVIEW: if there's any point in keeping this around. It's here for completeness, +// but not currently used anywhere in package, and also not currently exported. +// type anyNode struct { +// kind ipld.ReprKind +// +// plainMap +// plainList +// plainBool +// plainInt +// plainFloat +// plainString +// plainBytes +// plainLink +// } + +// -- Node interface methods --> + +// Unimplemented at present -- see "REVIEW" comment on anyNode. + +// -- NodePrototype --> + +type Prototype__Any struct{} + +func (Prototype__Any) NewBuilder() ipld.NodeBuilder { + return &anyBuilder{} +} + +// -- NodeBuilder --> + +// anyBuilder is a builder for any kind of node. +// +// anyBuilder is a little unusual in its internal workings: +// unlike most builders, it doesn't embed the corresponding assembler, +// nor will it end up using anyNode, +// but instead embeds a builder for each of the kinds it might contain. +// This is because we want a more granular return at the end: +// if we used anyNode, and returned a pointer to just the relevant part of it, +// we'd have all the extra bytes of anyNode still reachable in GC terms +// for as long as that handle to the interior of it remains live. +type anyBuilder struct { + // kind is set on first interaction, and used to select which builder to delegate 'Build' to! + // As soon as it's been set to a value other than zero (being "Invalid"), all other Assign/Begin calls will fail since something is already in progress. + // May also be set to the magic value '99', which means "i dunno, I'm just carrying another node of unknown prototype". + kind ipld.ReprKind + + // Only one of the following ends up being used... + // but we don't know in advance which one, so all are embeded here. + // This uses excessive space, but amortizes allocations, and all will be + // freed as soon as the builder is done. + // Builders are only used for recursives; + // scalars are simple enough we just do them directly. + // 'scalarNode' may also hold another Node of unknown prototype (possibly not even from this package), + // in which case this is indicated by 'kind==99'. + + mapBuilder plainMap__Builder + listBuilder plainList__Builder + scalarNode ipld.Node +} + +func (nb *anyBuilder) Reset() { + *nb = anyBuilder{} +} + +func (nb *anyBuilder) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_Map + nb.mapBuilder.w = &plainMap{} + return nb.mapBuilder.BeginMap(sizeHint) +} +func (nb *anyBuilder) BeginList(sizeHint int) (ipld.ListAssembler, error) { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_List + nb.listBuilder.w = &plainList{} + return nb.listBuilder.BeginList(sizeHint) +} +func (nb *anyBuilder) AssignNull() error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_Null + return nil +} +func (nb *anyBuilder) AssignBool(v bool) error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_Bool + nb.scalarNode = NewBool(v) + return nil +} +func (nb *anyBuilder) AssignInt(v int) error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_Int + nb.scalarNode = NewInt(v) + return nil +} +func (nb *anyBuilder) AssignFloat(v float64) error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_Float + nb.scalarNode = NewFloat(v) + return nil +} +func (nb *anyBuilder) AssignString(v string) error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_String + nb.scalarNode = NewString(v) + return nil +} +func (nb *anyBuilder) AssignBytes(v []byte) error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_Bytes + nb.scalarNode = NewBytes(v) + return nil +} +func (nb *anyBuilder) AssignLink(v ipld.Link) error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = ipld.ReprKind_Link + nb.scalarNode = NewLink(v) + return nil +} +func (nb *anyBuilder) AssignNode(v ipld.Node) error { + if nb.kind != ipld.ReprKind_Invalid { + panic("misuse") + } + nb.kind = 99 + nb.scalarNode = v + return nil +} +func (anyBuilder) Prototype() ipld.NodePrototype { + return Prototype__Any{} +} + +func (nb *anyBuilder) Build() ipld.Node { + switch nb.kind { + case ipld.ReprKind_Invalid: + panic("misuse") + case ipld.ReprKind_Map: + return nb.mapBuilder.Build() + case ipld.ReprKind_List: + return nb.listBuilder.Build() + case ipld.ReprKind_Null: + return ipld.Null + case ipld.ReprKind_Bool: + return nb.scalarNode + case ipld.ReprKind_Int: + return nb.scalarNode + case ipld.ReprKind_Float: + return nb.scalarNode + case ipld.ReprKind_String: + return nb.scalarNode + case ipld.ReprKind_Bytes: + return nb.scalarNode + case ipld.ReprKind_Link: + return nb.scalarNode + case 99: + return nb.scalarNode + default: + panic("unreachable") + } +} + +// -- NodeAssembler --> + +// ... oddly enough, we seem to be able to put off implementing this +// until we also implement something that goes full-hog on amortization +// and actually has a slab of `anyNode`. Which so far, nothing does. +// See "REVIEW" comment on anyNode. +// type anyAssembler struct { +// w *anyNode +// } diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/bool.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/bool.go new file mode 100644 index 0000000000..17b00937c0 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/bool.go @@ -0,0 +1,144 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = plainBool(false) + _ ipld.NodePrototype = Prototype__Bool{} + _ ipld.NodeBuilder = &plainBool__Builder{} + _ ipld.NodeAssembler = &plainBool__Assembler{} +) + +func NewBool(value bool) ipld.Node { + v := plainBool(value) + return &v +} + +// plainBool is a simple boxed boolean that complies with ipld.Node. +type plainBool bool + +// -- Node interface methods --> + +func (plainBool) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Bool +} +func (plainBool) LookupByString(string) (ipld.Node, error) { + return mixins.Bool{"bool"}.LookupByString("") +} +func (plainBool) LookupByNode(key ipld.Node) (ipld.Node, error) { + return mixins.Bool{"bool"}.LookupByNode(nil) +} +func (plainBool) LookupByIndex(idx int) (ipld.Node, error) { + return mixins.Bool{"bool"}.LookupByIndex(0) +} +func (plainBool) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + return mixins.Bool{"bool"}.LookupBySegment(seg) +} +func (plainBool) MapIterator() ipld.MapIterator { + return nil +} +func (plainBool) ListIterator() ipld.ListIterator { + return nil +} +func (plainBool) Length() int { + return -1 +} +func (plainBool) IsAbsent() bool { + return false +} +func (plainBool) IsNull() bool { + return false +} +func (n plainBool) AsBool() (bool, error) { + return bool(n), nil +} +func (plainBool) AsInt() (int, error) { + return mixins.Bool{"bool"}.AsInt() +} +func (plainBool) AsFloat() (float64, error) { + return mixins.Bool{"bool"}.AsFloat() +} +func (plainBool) AsString() (string, error) { + return mixins.Bool{"bool"}.AsString() +} +func (plainBool) AsBytes() ([]byte, error) { + return mixins.Bool{"bool"}.AsBytes() +} +func (plainBool) AsLink() (ipld.Link, error) { + return mixins.Bool{"bool"}.AsLink() +} +func (plainBool) Prototype() ipld.NodePrototype { + return Prototype__Bool{} +} + +// -- NodePrototype --> + +type Prototype__Bool struct{} + +func (Prototype__Bool) NewBuilder() ipld.NodeBuilder { + var w plainBool + return &plainBool__Builder{plainBool__Assembler{w: &w}} +} + +// -- NodeBuilder --> + +type plainBool__Builder struct { + plainBool__Assembler +} + +func (nb *plainBool__Builder) Build() ipld.Node { + return nb.w +} +func (nb *plainBool__Builder) Reset() { + var w plainBool + *nb = plainBool__Builder{plainBool__Assembler{w: &w}} +} + +// -- NodeAssembler --> + +type plainBool__Assembler struct { + w *plainBool +} + +func (plainBool__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.BoolAssembler{"bool"}.BeginMap(0) +} +func (plainBool__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.BoolAssembler{"bool"}.BeginList(0) +} +func (plainBool__Assembler) AssignNull() error { + return mixins.BoolAssembler{"bool"}.AssignNull() +} +func (na *plainBool__Assembler) AssignBool(v bool) error { + *na.w = plainBool(v) + return nil +} +func (plainBool__Assembler) AssignInt(int) error { + return mixins.BoolAssembler{"bool"}.AssignInt(0) +} +func (plainBool__Assembler) AssignFloat(float64) error { + return mixins.BoolAssembler{"bool"}.AssignFloat(0) +} +func (plainBool__Assembler) AssignString(string) error { + return mixins.BoolAssembler{"bool"}.AssignString("") +} +func (plainBool__Assembler) AssignBytes([]byte) error { + return mixins.BoolAssembler{"bool"}.AssignBytes(nil) +} +func (plainBool__Assembler) AssignLink(ipld.Link) error { + return mixins.BoolAssembler{"bool"}.AssignLink(nil) +} +func (na *plainBool__Assembler) AssignNode(v ipld.Node) error { + if v2, err := v.AsBool(); err != nil { + return err + } else { + *na.w = plainBool(v2) + return nil + } +} +func (plainBool__Assembler) Prototype() ipld.NodePrototype { + return Prototype__Bool{} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/bytes.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/bytes.go new file mode 100644 index 0000000000..08af9f6219 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/bytes.go @@ -0,0 +1,144 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = plainBytes(nil) + _ ipld.NodePrototype = Prototype__Bytes{} + _ ipld.NodeBuilder = &plainBytes__Builder{} + _ ipld.NodeAssembler = &plainBytes__Assembler{} +) + +func NewBytes(value []byte) ipld.Node { + v := plainBytes(value) + return &v +} + +// plainBytes is a simple boxed byte slice that complies with ipld.Node. +type plainBytes []byte + +// -- Node interface methods --> + +func (plainBytes) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Bytes +} +func (plainBytes) LookupByString(string) (ipld.Node, error) { + return mixins.Bytes{"bytes"}.LookupByString("") +} +func (plainBytes) LookupByNode(key ipld.Node) (ipld.Node, error) { + return mixins.Bytes{"bytes"}.LookupByNode(nil) +} +func (plainBytes) LookupByIndex(idx int) (ipld.Node, error) { + return mixins.Bytes{"bytes"}.LookupByIndex(0) +} +func (plainBytes) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + return mixins.Bytes{"bytes"}.LookupBySegment(seg) +} +func (plainBytes) MapIterator() ipld.MapIterator { + return nil +} +func (plainBytes) ListIterator() ipld.ListIterator { + return nil +} +func (plainBytes) Length() int { + return -1 +} +func (plainBytes) IsAbsent() bool { + return false +} +func (plainBytes) IsNull() bool { + return false +} +func (plainBytes) AsBool() (bool, error) { + return mixins.Bytes{"bytes"}.AsBool() +} +func (plainBytes) AsInt() (int, error) { + return mixins.Bytes{"bytes"}.AsInt() +} +func (plainBytes) AsFloat() (float64, error) { + return mixins.Bytes{"bytes"}.AsFloat() +} +func (plainBytes) AsString() (string, error) { + return mixins.Bytes{"bytes"}.AsString() +} +func (n plainBytes) AsBytes() ([]byte, error) { + return []byte(n), nil +} +func (plainBytes) AsLink() (ipld.Link, error) { + return mixins.Bytes{"bytes"}.AsLink() +} +func (plainBytes) Prototype() ipld.NodePrototype { + return Prototype__Bytes{} +} + +// -- NodePrototype --> + +type Prototype__Bytes struct{} + +func (Prototype__Bytes) NewBuilder() ipld.NodeBuilder { + var w plainBytes + return &plainBytes__Builder{plainBytes__Assembler{w: &w}} +} + +// -- NodeBuilder --> + +type plainBytes__Builder struct { + plainBytes__Assembler +} + +func (nb *plainBytes__Builder) Build() ipld.Node { + return nb.w +} +func (nb *plainBytes__Builder) Reset() { + var w plainBytes + *nb = plainBytes__Builder{plainBytes__Assembler{w: &w}} +} + +// -- NodeAssembler --> + +type plainBytes__Assembler struct { + w *plainBytes +} + +func (plainBytes__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.BytesAssembler{"bytes"}.BeginMap(0) +} +func (plainBytes__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.BytesAssembler{"bytes"}.BeginList(0) +} +func (plainBytes__Assembler) AssignNull() error { + return mixins.BytesAssembler{"bytes"}.AssignNull() +} +func (plainBytes__Assembler) AssignBool(bool) error { + return mixins.BytesAssembler{"bytes"}.AssignBool(false) +} +func (plainBytes__Assembler) AssignInt(int) error { + return mixins.BytesAssembler{"bytes"}.AssignInt(0) +} +func (plainBytes__Assembler) AssignFloat(float64) error { + return mixins.BytesAssembler{"bytes"}.AssignFloat(0) +} +func (plainBytes__Assembler) AssignString(string) error { + return mixins.BytesAssembler{"bytes"}.AssignString("") +} +func (na *plainBytes__Assembler) AssignBytes(v []byte) error { + *na.w = plainBytes(v) + return nil +} +func (plainBytes__Assembler) AssignLink(ipld.Link) error { + return mixins.BytesAssembler{"bytes"}.AssignLink(nil) +} +func (na *plainBytes__Assembler) AssignNode(v ipld.Node) error { + if v2, err := v.AsBytes(); err != nil { + return err + } else { + *na.w = plainBytes(v2) + return nil + } +} +func (plainBytes__Assembler) Prototype() ipld.NodePrototype { + return Prototype__Bytes{} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/float.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/float.go new file mode 100644 index 0000000000..0449a2d8a1 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/float.go @@ -0,0 +1,144 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = plainFloat(0) + _ ipld.NodePrototype = Prototype__Float{} + _ ipld.NodeBuilder = &plainFloat__Builder{} + _ ipld.NodeAssembler = &plainFloat__Assembler{} +) + +func NewFloat(value float64) ipld.Node { + v := plainFloat(value) + return &v +} + +// plainFloat is a simple boxed float that complies with ipld.Node. +type plainFloat float64 + +// -- Node interface methods --> + +func (plainFloat) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Float +} +func (plainFloat) LookupByString(string) (ipld.Node, error) { + return mixins.Float{"float"}.LookupByString("") +} +func (plainFloat) LookupByNode(key ipld.Node) (ipld.Node, error) { + return mixins.Float{"float"}.LookupByNode(nil) +} +func (plainFloat) LookupByIndex(idx int) (ipld.Node, error) { + return mixins.Float{"float"}.LookupByIndex(0) +} +func (plainFloat) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + return mixins.Float{"float"}.LookupBySegment(seg) +} +func (plainFloat) MapIterator() ipld.MapIterator { + return nil +} +func (plainFloat) ListIterator() ipld.ListIterator { + return nil +} +func (plainFloat) Length() int { + return -1 +} +func (plainFloat) IsAbsent() bool { + return false +} +func (plainFloat) IsNull() bool { + return false +} +func (plainFloat) AsBool() (bool, error) { + return mixins.Float{"float"}.AsBool() +} +func (plainFloat) AsInt() (int, error) { + return mixins.Float{"float"}.AsInt() +} +func (n plainFloat) AsFloat() (float64, error) { + return float64(n), nil +} +func (plainFloat) AsString() (string, error) { + return mixins.Float{"float"}.AsString() +} +func (plainFloat) AsBytes() ([]byte, error) { + return mixins.Float{"float"}.AsBytes() +} +func (plainFloat) AsLink() (ipld.Link, error) { + return mixins.Float{"float"}.AsLink() +} +func (plainFloat) Prototype() ipld.NodePrototype { + return Prototype__Float{} +} + +// -- NodePrototype --> + +type Prototype__Float struct{} + +func (Prototype__Float) NewBuilder() ipld.NodeBuilder { + var w plainFloat + return &plainFloat__Builder{plainFloat__Assembler{w: &w}} +} + +// -- NodeBuilder --> + +type plainFloat__Builder struct { + plainFloat__Assembler +} + +func (nb *plainFloat__Builder) Build() ipld.Node { + return nb.w +} +func (nb *plainFloat__Builder) Reset() { + var w plainFloat + *nb = plainFloat__Builder{plainFloat__Assembler{w: &w}} +} + +// -- NodeAssembler --> + +type plainFloat__Assembler struct { + w *plainFloat +} + +func (plainFloat__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.FloatAssembler{"float"}.BeginMap(0) +} +func (plainFloat__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.FloatAssembler{"float"}.BeginList(0) +} +func (plainFloat__Assembler) AssignNull() error { + return mixins.FloatAssembler{"float"}.AssignNull() +} +func (plainFloat__Assembler) AssignBool(bool) error { + return mixins.FloatAssembler{"float"}.AssignBool(false) +} +func (plainFloat__Assembler) AssignInt(int) error { + return mixins.FloatAssembler{"float"}.AssignInt(0) +} +func (na *plainFloat__Assembler) AssignFloat(v float64) error { + *na.w = plainFloat(v) + return nil +} +func (plainFloat__Assembler) AssignString(string) error { + return mixins.FloatAssembler{"float"}.AssignString("") +} +func (plainFloat__Assembler) AssignBytes([]byte) error { + return mixins.FloatAssembler{"float"}.AssignBytes(nil) +} +func (plainFloat__Assembler) AssignLink(ipld.Link) error { + return mixins.FloatAssembler{"float"}.AssignLink(nil) +} +func (na *plainFloat__Assembler) AssignNode(v ipld.Node) error { + if v2, err := v.AsFloat(); err != nil { + return err + } else { + *na.w = plainFloat(v2) + return nil + } +} +func (plainFloat__Assembler) Prototype() ipld.NodePrototype { + return Prototype__Float{} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/int.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/int.go new file mode 100644 index 0000000000..82d9ecc780 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/int.go @@ -0,0 +1,144 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = plainInt(0) + _ ipld.NodePrototype = Prototype__Int{} + _ ipld.NodeBuilder = &plainInt__Builder{} + _ ipld.NodeAssembler = &plainInt__Assembler{} +) + +func NewInt(value int) ipld.Node { + v := plainInt(value) + return &v +} + +// plainInt is a simple boxed int that complies with ipld.Node. +type plainInt int + +// -- Node interface methods --> + +func (plainInt) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Int +} +func (plainInt) LookupByString(string) (ipld.Node, error) { + return mixins.Int{"int"}.LookupByString("") +} +func (plainInt) LookupByNode(key ipld.Node) (ipld.Node, error) { + return mixins.Int{"int"}.LookupByNode(nil) +} +func (plainInt) LookupByIndex(idx int) (ipld.Node, error) { + return mixins.Int{"int"}.LookupByIndex(0) +} +func (plainInt) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + return mixins.Int{"int"}.LookupBySegment(seg) +} +func (plainInt) MapIterator() ipld.MapIterator { + return nil +} +func (plainInt) ListIterator() ipld.ListIterator { + return nil +} +func (plainInt) Length() int { + return -1 +} +func (plainInt) IsAbsent() bool { + return false +} +func (plainInt) IsNull() bool { + return false +} +func (plainInt) AsBool() (bool, error) { + return mixins.Int{"int"}.AsBool() +} +func (n plainInt) AsInt() (int, error) { + return int(n), nil +} +func (plainInt) AsFloat() (float64, error) { + return mixins.Int{"int"}.AsFloat() +} +func (plainInt) AsString() (string, error) { + return mixins.Int{"int"}.AsString() +} +func (plainInt) AsBytes() ([]byte, error) { + return mixins.Int{"int"}.AsBytes() +} +func (plainInt) AsLink() (ipld.Link, error) { + return mixins.Int{"int"}.AsLink() +} +func (plainInt) Prototype() ipld.NodePrototype { + return Prototype__Int{} +} + +// -- NodePrototype --> + +type Prototype__Int struct{} + +func (Prototype__Int) NewBuilder() ipld.NodeBuilder { + var w plainInt + return &plainInt__Builder{plainInt__Assembler{w: &w}} +} + +// -- NodeBuilder --> + +type plainInt__Builder struct { + plainInt__Assembler +} + +func (nb *plainInt__Builder) Build() ipld.Node { + return nb.w +} +func (nb *plainInt__Builder) Reset() { + var w plainInt + *nb = plainInt__Builder{plainInt__Assembler{w: &w}} +} + +// -- NodeAssembler --> + +type plainInt__Assembler struct { + w *plainInt +} + +func (plainInt__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.IntAssembler{"int"}.BeginMap(0) +} +func (plainInt__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.IntAssembler{"int"}.BeginList(0) +} +func (plainInt__Assembler) AssignNull() error { + return mixins.IntAssembler{"int"}.AssignNull() +} +func (plainInt__Assembler) AssignBool(bool) error { + return mixins.IntAssembler{"int"}.AssignBool(false) +} +func (na *plainInt__Assembler) AssignInt(v int) error { + *na.w = plainInt(v) + return nil +} +func (plainInt__Assembler) AssignFloat(float64) error { + return mixins.IntAssembler{"int"}.AssignFloat(0) +} +func (plainInt__Assembler) AssignString(string) error { + return mixins.IntAssembler{"int"}.AssignString("") +} +func (plainInt__Assembler) AssignBytes([]byte) error { + return mixins.IntAssembler{"int"}.AssignBytes(nil) +} +func (plainInt__Assembler) AssignLink(ipld.Link) error { + return mixins.IntAssembler{"int"}.AssignLink(nil) +} +func (na *plainInt__Assembler) AssignNode(v ipld.Node) error { + if v2, err := v.AsInt(); err != nil { + return err + } else { + *na.w = plainInt(v2) + return nil + } +} +func (plainInt__Assembler) Prototype() ipld.NodePrototype { + return Prototype__Int{} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/link.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/link.go new file mode 100644 index 0000000000..1f26da8513 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/link.go @@ -0,0 +1,145 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = &plainLink{} + _ ipld.NodePrototype = Prototype__Link{} + _ ipld.NodeBuilder = &plainLink__Builder{} + _ ipld.NodeAssembler = &plainLink__Assembler{} +) + +func NewLink(value ipld.Link) ipld.Node { + return &plainLink{value} +} + +// plainLink is a simple box around a Link that complies with ipld.Node. +type plainLink struct { + x ipld.Link +} + +// -- Node interface methods --> + +func (plainLink) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Link +} +func (plainLink) LookupByString(string) (ipld.Node, error) { + return mixins.Link{"link"}.LookupByString("") +} +func (plainLink) LookupByNode(key ipld.Node) (ipld.Node, error) { + return mixins.Link{"link"}.LookupByNode(nil) +} +func (plainLink) LookupByIndex(idx int) (ipld.Node, error) { + return mixins.Link{"link"}.LookupByIndex(0) +} +func (plainLink) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + return mixins.Link{"link"}.LookupBySegment(seg) +} +func (plainLink) MapIterator() ipld.MapIterator { + return nil +} +func (plainLink) ListIterator() ipld.ListIterator { + return nil +} +func (plainLink) Length() int { + return -1 +} +func (plainLink) IsAbsent() bool { + return false +} +func (plainLink) IsNull() bool { + return false +} +func (plainLink) AsBool() (bool, error) { + return mixins.Link{"link"}.AsBool() +} +func (plainLink) AsInt() (int, error) { + return mixins.Link{"link"}.AsInt() +} +func (plainLink) AsFloat() (float64, error) { + return mixins.Link{"link"}.AsFloat() +} +func (plainLink) AsString() (string, error) { + return mixins.Link{"link"}.AsString() +} +func (plainLink) AsBytes() ([]byte, error) { + return mixins.Link{"link"}.AsBytes() +} +func (n *plainLink) AsLink() (ipld.Link, error) { + return n.x, nil +} +func (plainLink) Prototype() ipld.NodePrototype { + return Prototype__Link{} +} + +// -- NodePrototype --> + +type Prototype__Link struct{} + +func (Prototype__Link) NewBuilder() ipld.NodeBuilder { + var w plainLink + return &plainLink__Builder{plainLink__Assembler{w: &w}} +} + +// -- NodeBuilder --> + +type plainLink__Builder struct { + plainLink__Assembler +} + +func (nb *plainLink__Builder) Build() ipld.Node { + return nb.w +} +func (nb *plainLink__Builder) Reset() { + var w plainLink + *nb = plainLink__Builder{plainLink__Assembler{w: &w}} +} + +// -- NodeAssembler --> + +type plainLink__Assembler struct { + w *plainLink +} + +func (plainLink__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.LinkAssembler{"link"}.BeginMap(0) +} +func (plainLink__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.LinkAssembler{"link"}.BeginList(0) +} +func (plainLink__Assembler) AssignNull() error { + return mixins.LinkAssembler{"link"}.AssignNull() +} +func (plainLink__Assembler) AssignBool(bool) error { + return mixins.LinkAssembler{"link"}.AssignBool(false) +} +func (plainLink__Assembler) AssignInt(int) error { + return mixins.LinkAssembler{"link"}.AssignInt(0) +} +func (plainLink__Assembler) AssignFloat(float64) error { + return mixins.LinkAssembler{"link"}.AssignFloat(0) +} +func (plainLink__Assembler) AssignString(string) error { + return mixins.LinkAssembler{"link"}.AssignString("") +} +func (plainLink__Assembler) AssignBytes([]byte) error { + return mixins.LinkAssembler{"link"}.AssignBytes(nil) +} +func (na *plainLink__Assembler) AssignLink(v ipld.Link) error { + na.w.x = v + return nil +} +func (na *plainLink__Assembler) AssignNode(v ipld.Node) error { + if v2, err := v.AsLink(); err != nil { + return err + } else { + na.w.x = v2 + return nil + } +} +func (plainLink__Assembler) Prototype() ipld.NodePrototype { + return Prototype__Link{} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/list.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/list.go new file mode 100644 index 0000000000..f8190a8d32 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/list.go @@ -0,0 +1,360 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = &plainList{} + _ ipld.NodePrototype = Prototype__List{} + _ ipld.NodeBuilder = &plainList__Builder{} + _ ipld.NodeAssembler = &plainList__Assembler{} +) + +// plainList is a concrete type that provides a list-kind ipld.Node. +// It can contain any kind of value. +// plainList is also embedded in the 'any' struct and usable from there. +type plainList struct { + x []ipld.Node +} + +// -- Node interface methods --> + +func (plainList) ReprKind() ipld.ReprKind { + return ipld.ReprKind_List +} +func (plainList) LookupByString(string) (ipld.Node, error) { + return mixins.List{"list"}.LookupByString("") +} +func (plainList) LookupByNode(ipld.Node) (ipld.Node, error) { + return mixins.List{"list"}.LookupByNode(nil) +} +func (n *plainList) LookupByIndex(idx int) (ipld.Node, error) { + if n.Length() <= idx { + return nil, ipld.ErrNotExists{ipld.PathSegmentOfInt(idx)} + } + return n.x[idx], nil +} +func (n *plainList) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + idx, err := seg.Index() + if err != nil { + return nil, ipld.ErrInvalidSegmentForList{TroubleSegment: seg, Reason: err} + } + return n.LookupByIndex(idx) +} +func (plainList) MapIterator() ipld.MapIterator { + return nil +} +func (n *plainList) ListIterator() ipld.ListIterator { + return &plainList_ListIterator{n, 0} +} +func (n *plainList) Length() int { + return len(n.x) +} +func (plainList) IsAbsent() bool { + return false +} +func (plainList) IsNull() bool { + return false +} +func (plainList) AsBool() (bool, error) { + return mixins.List{"list"}.AsBool() +} +func (plainList) AsInt() (int, error) { + return mixins.List{"list"}.AsInt() +} +func (plainList) AsFloat() (float64, error) { + return mixins.List{"list"}.AsFloat() +} +func (plainList) AsString() (string, error) { + return mixins.List{"list"}.AsString() +} +func (plainList) AsBytes() ([]byte, error) { + return mixins.List{"list"}.AsBytes() +} +func (plainList) AsLink() (ipld.Link, error) { + return mixins.List{"list"}.AsLink() +} +func (plainList) Prototype() ipld.NodePrototype { + return Prototype__List{} +} + +type plainList_ListIterator struct { + n *plainList + idx int +} + +func (itr *plainList_ListIterator) Next() (idx int, v ipld.Node, _ error) { + if itr.Done() { + return -1, nil, ipld.ErrIteratorOverread{} + } + v = itr.n.x[itr.idx] + idx = itr.idx + itr.idx++ + return +} +func (itr *plainList_ListIterator) Done() bool { + return itr.idx >= len(itr.n.x) +} + +// -- NodePrototype --> + +type Prototype__List struct{} + +func (Prototype__List) NewBuilder() ipld.NodeBuilder { + return &plainList__Builder{plainList__Assembler{w: &plainList{}}} +} + +// -- NodeBuilder --> + +type plainList__Builder struct { + plainList__Assembler +} + +func (nb *plainList__Builder) Build() ipld.Node { + if nb.state != laState_finished { + panic("invalid state: assembler must be 'finished' before Build can be called!") + } + return nb.w +} +func (nb *plainList__Builder) Reset() { + *nb = plainList__Builder{} + nb.w = &plainList{} +} + +// -- NodeAssembler --> + +type plainList__Assembler struct { + w *plainList + + va plainList__ValueAssembler + + state laState +} +type plainList__ValueAssembler struct { + la *plainList__Assembler +} + +// laState is an enum of the state machine for a list assembler. +// (this might be something to export reusably, but it's also very much an impl detail that need not be seen, so, dubious.) +// it's similar to maState for maps, but has fewer states because we never have keys to assemble. +type laState uint8 + +const ( + laState_initial laState = iota // also the 'expect value or finish' state + laState_midValue // waiting for a 'finished' state in the ValueAssembler. + laState_finished // 'w' will also be nil, but this is a politer statement +) + +func (plainList__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.ListAssembler{"list"}.BeginMap(0) +} +func (na *plainList__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + if sizeHint < 0 { + sizeHint = 0 + } + // Allocate storage space. + na.w.x = make([]ipld.Node, 0, sizeHint) + // That's it; return self as the ListAssembler. We already have all the right methods on this structure. + return na, nil +} +func (plainList__Assembler) AssignNull() error { + return mixins.ListAssembler{"list"}.AssignNull() +} +func (plainList__Assembler) AssignBool(bool) error { + return mixins.ListAssembler{"list"}.AssignBool(false) +} +func (plainList__Assembler) AssignInt(int) error { + return mixins.ListAssembler{"list"}.AssignInt(0) +} +func (plainList__Assembler) AssignFloat(float64) error { + return mixins.ListAssembler{"list"}.AssignFloat(0) +} +func (plainList__Assembler) AssignString(string) error { + return mixins.ListAssembler{"list"}.AssignString("") +} +func (plainList__Assembler) AssignBytes([]byte) error { + return mixins.ListAssembler{"list"}.AssignBytes(nil) +} +func (plainList__Assembler) AssignLink(ipld.Link) error { + return mixins.ListAssembler{"list"}.AssignLink(nil) +} +func (na *plainList__Assembler) AssignNode(v ipld.Node) error { + // Sanity check, then update, assembler state. + // Update of state to 'finished' comes later; where exactly depends on if shortcuts apply. + if na.state != laState_initial { + panic("misuse") + } + // Copy the content. + if v2, ok := v.(*plainList); ok { // if our own type: shortcut. + // Copy the structure by value. + // This means we'll have pointers into the same internal maps and slices; + // this is okay, because the Node type promises it's immutable, and we are going to instantly finish ourselves to also maintain that. + // FIXME: the shortcut behaves differently than the long way: it discards any existing progress. Doesn't violate immut, but is odd. + *na.w = *v2 + na.state = laState_finished + return nil + } + // If the above shortcut didn't work, resort to a generic copy. + // We call AssignNode for all the child values, giving them a chance to hit shortcuts even if we didn't. + if v.ReprKind() != ipld.ReprKind_List { + return ipld.ErrWrongKind{TypeName: "list", MethodName: "AssignNode", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: v.ReprKind()} + } + itr := v.ListIterator() + for !itr.Done() { + _, v, err := itr.Next() + if err != nil { + return err + } + if err := na.AssembleValue().AssignNode(v); err != nil { + return err + } + } + return na.Finish() +} +func (plainList__Assembler) Prototype() ipld.NodePrototype { + return Prototype__List{} +} + +// -- ListAssembler --> + +// AssembleValue is part of conforming to ListAssembler, which we do on +// plainList__Assembler so that BeginList can just return a retyped pointer rather than new object. +func (la *plainList__Assembler) AssembleValue() ipld.NodeAssembler { + // Sanity check, then update, assembler state. + if la.state != laState_initial { + panic("misuse") + } + la.state = laState_midValue + // Make value assembler valid by giving it pointer back to whole 'la'; yield it. + la.va.la = la + return &la.va +} + +// Finish is part of conforming to ListAssembler, which we do on +// plainList__Assembler so that BeginList can just return a retyped pointer rather than new object. +func (la *plainList__Assembler) Finish() error { + // Sanity check, then update, assembler state. + if la.state != laState_initial { + panic("misuse") + } + la.state = laState_finished + // validators could run and report errors promptly, if this type had any. + return nil +} +func (plainList__Assembler) ValuePrototype(_ int) ipld.NodePrototype { + return Prototype__Any{} +} + +// -- ListAssembler.ValueAssembler --> + +func (lva *plainList__ValueAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + ma := plainList__ValueAssemblerMap{} + ma.ca.w = &plainMap{} + ma.p = lva.la + _, err := ma.ca.BeginMap(sizeHint) + return &ma, err +} +func (lva *plainList__ValueAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + la := plainList__ValueAssemblerList{} + la.ca.w = &plainList{} + la.p = lva.la + _, err := la.ca.BeginList(sizeHint) + return &la, err +} +func (lva *plainList__ValueAssembler) AssignNull() error { + return lva.AssignNode(ipld.Null) +} +func (lva *plainList__ValueAssembler) AssignBool(v bool) error { + vb := plainBool(v) + return lva.AssignNode(&vb) +} +func (lva *plainList__ValueAssembler) AssignInt(v int) error { + vb := plainInt(v) + return lva.AssignNode(&vb) +} +func (lva *plainList__ValueAssembler) AssignFloat(v float64) error { + vb := plainFloat(v) + return lva.AssignNode(&vb) +} +func (lva *plainList__ValueAssembler) AssignString(v string) error { + vb := plainString(v) + return lva.AssignNode(&vb) +} +func (lva *plainList__ValueAssembler) AssignBytes(v []byte) error { + vb := plainBytes(v) + return lva.AssignNode(&vb) +} +func (lva *plainList__ValueAssembler) AssignLink(v ipld.Link) error { + vb := plainLink{v} + return lva.AssignNode(&vb) +} +func (lva *plainList__ValueAssembler) AssignNode(v ipld.Node) error { + lva.la.w.x = append(lva.la.w.x, v) + lva.la.state = laState_initial + lva.la = nil // invalidate self to prevent further incorrect use. + return nil +} +func (plainList__ValueAssembler) Prototype() ipld.NodePrototype { + return Prototype__Any{} +} + +type plainList__ValueAssemblerMap struct { + ca plainMap__Assembler + p *plainList__Assembler // pointer back to parent, for final insert and state bump +} + +// we briefly state only the methods we need to delegate here. +// just embedding plainMap__Assembler also behaves correctly, +// but causes a lot of unnecessary autogenerated functions in the final binary. + +func (ma *plainList__ValueAssemblerMap) AssembleEntry(k string) (ipld.NodeAssembler, error) { + return ma.ca.AssembleEntry(k) +} +func (ma *plainList__ValueAssemblerMap) AssembleKey() ipld.NodeAssembler { + return ma.ca.AssembleKey() +} +func (ma *plainList__ValueAssemblerMap) AssembleValue() ipld.NodeAssembler { + return ma.ca.AssembleValue() +} +func (plainList__ValueAssemblerMap) KeyPrototype() ipld.NodePrototype { + return Prototype__String{} +} +func (plainList__ValueAssemblerMap) ValuePrototype(_ string) ipld.NodePrototype { + return Prototype__Any{} +} + +func (ma *plainList__ValueAssemblerMap) Finish() error { + if err := ma.ca.Finish(); err != nil { + return err + } + w := ma.ca.w + ma.ca.w = nil + return ma.p.va.AssignNode(w) +} + +type plainList__ValueAssemblerList struct { + ca plainList__Assembler + p *plainList__Assembler // pointer back to parent, for final insert and state bump +} + +// we briefly state only the methods we need to delegate here. +// just embedding plainList__Assembler also behaves correctly, +// but causes a lot of unnecessary autogenerated functions in the final binary. + +func (la *plainList__ValueAssemblerList) AssembleValue() ipld.NodeAssembler { + return la.ca.AssembleValue() +} +func (plainList__ValueAssemblerList) ValuePrototype(_ int) ipld.NodePrototype { + return Prototype__Any{} +} + +func (la *plainList__ValueAssemblerList) Finish() error { + if err := la.ca.Finish(); err != nil { + return err + } + w := la.ca.w + la.ca.w = nil + return la.p.va.AssignNode(w) +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/map.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/map.go new file mode 100644 index 0000000000..ef8ff3de34 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/map.go @@ -0,0 +1,474 @@ +package basicnode + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = &plainMap{} + _ ipld.NodePrototype = Prototype__Map{} + _ ipld.NodeBuilder = &plainMap__Builder{} + _ ipld.NodeAssembler = &plainMap__Assembler{} +) + +// plainMap is a concrete type that provides a map-kind ipld.Node. +// It can contain any kind of value. +// plainMap is also embedded in the 'any' struct and usable from there. +type plainMap struct { + m map[string]ipld.Node // string key -- even if a runtime schema wrapper is using us for storage, we must have a comparable type here, and string is all we know. + t []plainMap__Entry // table for fast iteration, order keeping, and yielding pointers to enable alloc/conv amortization. +} + +type plainMap__Entry struct { + k plainString // address of this used when we return keys as nodes, such as in iterators. Need in one place to amortize shifts to heap when ptr'ing for iface. + v ipld.Node // identical to map values. keeping them here simplifies iteration. (in codegen'd maps, this position is also part of amortization, but in this implementation, that's less useful.) + // note on alternate implementations: 'v' could also use the 'any' type, and thus amortize value allocations. the memory size trade would be large however, so we don't, here. +} + +// -- Node interface methods --> + +func (plainMap) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Map +} +func (n *plainMap) LookupByString(key string) (ipld.Node, error) { + v, exists := n.m[key] + if !exists { + return nil, ipld.ErrNotExists{ipld.PathSegmentOfString(key)} + } + return v, nil +} +func (n *plainMap) LookupByNode(key ipld.Node) (ipld.Node, error) { + ks, err := key.AsString() + if err != nil { + return nil, err + } + return n.LookupByString(ks) +} +func (plainMap) LookupByIndex(idx int) (ipld.Node, error) { + return mixins.Map{"map"}.LookupByIndex(0) +} +func (n *plainMap) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + return n.LookupByString(seg.String()) +} +func (n *plainMap) MapIterator() ipld.MapIterator { + return &plainMap_MapIterator{n, 0} +} +func (plainMap) ListIterator() ipld.ListIterator { + return nil +} +func (n *plainMap) Length() int { + return len(n.t) +} +func (plainMap) IsAbsent() bool { + return false +} +func (plainMap) IsNull() bool { + return false +} +func (plainMap) AsBool() (bool, error) { + return mixins.Map{"map"}.AsBool() +} +func (plainMap) AsInt() (int, error) { + return mixins.Map{"map"}.AsInt() +} +func (plainMap) AsFloat() (float64, error) { + return mixins.Map{"map"}.AsFloat() +} +func (plainMap) AsString() (string, error) { + return mixins.Map{"map"}.AsString() +} +func (plainMap) AsBytes() ([]byte, error) { + return mixins.Map{"map"}.AsBytes() +} +func (plainMap) AsLink() (ipld.Link, error) { + return mixins.Map{"map"}.AsLink() +} +func (plainMap) Prototype() ipld.NodePrototype { + return Prototype__Map{} +} + +type plainMap_MapIterator struct { + n *plainMap + idx int +} + +func (itr *plainMap_MapIterator) Next() (k ipld.Node, v ipld.Node, _ error) { + if itr.Done() { + return nil, nil, ipld.ErrIteratorOverread{} + } + k = &itr.n.t[itr.idx].k + v = itr.n.t[itr.idx].v + itr.idx++ + return +} +func (itr *plainMap_MapIterator) Done() bool { + return itr.idx >= len(itr.n.t) +} + +// -- NodePrototype --> + +type Prototype__Map struct{} + +func (Prototype__Map) NewBuilder() ipld.NodeBuilder { + return &plainMap__Builder{plainMap__Assembler{w: &plainMap{}}} +} + +// -- NodeBuilder --> + +type plainMap__Builder struct { + plainMap__Assembler +} + +func (nb *plainMap__Builder) Build() ipld.Node { + if nb.state != maState_finished { + panic("invalid state: assembler must be 'finished' before Build can be called!") + } + return nb.w +} +func (nb *plainMap__Builder) Reset() { + *nb = plainMap__Builder{} + nb.w = &plainMap{} +} + +// -- NodeAssembler --> + +type plainMap__Assembler struct { + w *plainMap + + ka plainMap__KeyAssembler + va plainMap__ValueAssembler + + state maState +} +type plainMap__KeyAssembler struct { + ma *plainMap__Assembler +} +type plainMap__ValueAssembler struct { + ma *plainMap__Assembler +} + +// maState is an enum of the state machine for a map assembler. +// (this might be something to export reusably, but it's also very much an impl detail that need not be seen, so, dubious.) +type maState uint8 + +const ( + maState_initial maState = iota // also the 'expect key or finish' state + maState_midKey // waiting for a 'finished' state in the KeyAssembler. + maState_expectValue // 'AssembleValue' is the only valid next step + maState_midValue // waiting for a 'finished' state in the ValueAssembler. + maState_finished // 'w' will also be nil, but this is a politer statement +) + +func (na *plainMap__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + if sizeHint < 0 { + sizeHint = 0 + } + // Allocate storage space. + na.w.t = make([]plainMap__Entry, 0, sizeHint) + na.w.m = make(map[string]ipld.Node, sizeHint) + // That's it; return self as the MapAssembler. We already have all the right methods on this structure. + return na, nil +} +func (plainMap__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.MapAssembler{"map"}.BeginList(0) +} +func (plainMap__Assembler) AssignNull() error { + return mixins.MapAssembler{"map"}.AssignNull() +} +func (plainMap__Assembler) AssignBool(bool) error { + return mixins.MapAssembler{"map"}.AssignBool(false) +} +func (plainMap__Assembler) AssignInt(int) error { + return mixins.MapAssembler{"map"}.AssignInt(0) +} +func (plainMap__Assembler) AssignFloat(float64) error { + return mixins.MapAssembler{"map"}.AssignFloat(0) +} +func (plainMap__Assembler) AssignString(string) error { + return mixins.MapAssembler{"map"}.AssignString("") +} +func (plainMap__Assembler) AssignBytes([]byte) error { + return mixins.MapAssembler{"map"}.AssignBytes(nil) +} +func (plainMap__Assembler) AssignLink(ipld.Link) error { + return mixins.MapAssembler{"map"}.AssignLink(nil) +} +func (na *plainMap__Assembler) AssignNode(v ipld.Node) error { + // Sanity check assembler state. + // Update of state to 'finished' comes later; where exactly depends on if shortcuts apply. + if na.state != maState_initial { + panic("misuse") + } + // Copy the content. + if v2, ok := v.(*plainMap); ok { // if our own type: shortcut. + // Copy the structure by value. + // This means we'll have pointers into the same internal maps and slices; + // this is okay, because the Node type promises it's immutable, and we are going to instantly finish ourselves to also maintain that. + // FIXME: the shortcut behaves differently than the long way: it discards any existing progress. Doesn't violate immut, but is odd. + *na.w = *v2 + na.state = maState_finished + return nil + } + // If the above shortcut didn't work, resort to a generic copy. + // We call AssignNode for all the child values, giving them a chance to hit shortcuts even if we didn't. + if v.ReprKind() != ipld.ReprKind_Map { + return ipld.ErrWrongKind{TypeName: "map", MethodName: "AssignNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: v.ReprKind()} + } + itr := v.MapIterator() + for !itr.Done() { + k, v, err := itr.Next() + if err != nil { + return err + } + if err := na.AssembleKey().AssignNode(k); err != nil { + return err + } + if err := na.AssembleValue().AssignNode(v); err != nil { + return err + } + } + return na.Finish() +} +func (plainMap__Assembler) Prototype() ipld.NodePrototype { + return Prototype__Map{} +} + +// -- MapAssembler --> + +// AssembleEntry is part of conforming to MapAssembler, which we do on +// plainMap__Assembler so that BeginMap can just return a retyped pointer rather than new object. +func (ma *plainMap__Assembler) AssembleEntry(k string) (ipld.NodeAssembler, error) { + // Sanity check assembler state. + // Update of state comes after possible key rejection. + if ma.state != maState_initial { + panic("misuse") + } + // Check for dup keys; error if so. + _, exists := ma.w.m[k] + if exists { + return nil, ipld.ErrRepeatedMapKey{plainString(k)} + } + ma.state = maState_midValue + ma.w.t = append(ma.w.t, plainMap__Entry{k: plainString(k)}) + // Make value assembler valid by giving it pointer back to whole 'ma'; yield it. + ma.va.ma = ma + return &ma.va, nil +} + +// AssembleKey is part of conforming to MapAssembler, which we do on +// plainMap__Assembler so that BeginMap can just return a retyped pointer rather than new object. +func (ma *plainMap__Assembler) AssembleKey() ipld.NodeAssembler { + // Sanity check, then update, assembler state. + if ma.state != maState_initial { + panic("misuse") + } + ma.state = maState_midKey + // Make key assembler valid by giving it pointer back to whole 'ma'; yield it. + ma.ka.ma = ma + return &ma.ka +} + +// AssembleValue is part of conforming to MapAssembler, which we do on +// plainMap__Assembler so that BeginMap can just return a retyped pointer rather than new object. +func (ma *plainMap__Assembler) AssembleValue() ipld.NodeAssembler { + // Sanity check, then update, assembler state. + if ma.state != maState_expectValue { + panic("misuse") + } + ma.state = maState_midValue + // Make value assembler valid by giving it pointer back to whole 'ma'; yield it. + ma.va.ma = ma + return &ma.va +} + +// Finish is part of conforming to MapAssembler, which we do on +// plainMap__Assembler so that BeginMap can just return a retyped pointer rather than new object. +func (ma *plainMap__Assembler) Finish() error { + // Sanity check, then update, assembler state. + if ma.state != maState_initial { + panic("misuse") + } + ma.state = maState_finished + // validators could run and report errors promptly, if this type had any. + return nil +} +func (plainMap__Assembler) KeyPrototype() ipld.NodePrototype { + return Prototype__String{} +} +func (plainMap__Assembler) ValuePrototype(_ string) ipld.NodePrototype { + return Prototype__Any{} +} + +// -- MapAssembler.KeyAssembler --> + +func (plainMap__KeyAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.StringAssembler{"string"}.BeginMap(0) +} +func (plainMap__KeyAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.StringAssembler{"string"}.BeginList(0) +} +func (plainMap__KeyAssembler) AssignNull() error { + return mixins.StringAssembler{"string"}.AssignNull() +} +func (plainMap__KeyAssembler) AssignBool(bool) error { + return mixins.StringAssembler{"string"}.AssignBool(false) +} +func (plainMap__KeyAssembler) AssignInt(int) error { + return mixins.StringAssembler{"string"}.AssignInt(0) +} +func (plainMap__KeyAssembler) AssignFloat(float64) error { + return mixins.StringAssembler{"string"}.AssignFloat(0) +} +func (mka *plainMap__KeyAssembler) AssignString(v string) error { + // Check for dup keys; error if so. + // (And, backtrack state to accepting keys again so we don't get eternally wedged here.) + _, exists := mka.ma.w.m[v] + if exists { + mka.ma.state = maState_initial + mka.ma = nil // invalidate self to prevent further incorrect use. + return ipld.ErrRepeatedMapKey{plainString(v)} + } + // Assign the key into the end of the entry table; + // we'll be doing map insertions after we get the value in hand. + // (There's no need to delegate to another assembler for the key type, + // because we're just at Data Model level here, which only regards plain strings.) + mka.ma.w.t = append(mka.ma.w.t, plainMap__Entry{}) + mka.ma.w.t[len(mka.ma.w.t)-1].k = plainString(v) + // Update parent assembler state: clear to proceed. + mka.ma.state = maState_expectValue + mka.ma = nil // invalidate self to prevent further incorrect use. + return nil +} +func (plainMap__KeyAssembler) AssignBytes([]byte) error { + return mixins.StringAssembler{"string"}.AssignBytes(nil) +} +func (plainMap__KeyAssembler) AssignLink(ipld.Link) error { + return mixins.StringAssembler{"string"}.AssignLink(nil) +} +func (mka *plainMap__KeyAssembler) AssignNode(v ipld.Node) error { + vs, err := v.AsString() + if err != nil { + return fmt.Errorf("cannot assign non-string node into map key assembler") // FIXME:errors: this doesn't quite fit in ErrWrongKind cleanly; new error type? + } + return mka.AssignString(vs) +} +func (plainMap__KeyAssembler) Prototype() ipld.NodePrototype { + return Prototype__String{} +} + +// -- MapAssembler.ValueAssembler --> + +func (mva *plainMap__ValueAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + ma := plainMap__ValueAssemblerMap{} + ma.ca.w = &plainMap{} + ma.p = mva.ma + _, err := ma.ca.BeginMap(sizeHint) + return &ma, err +} +func (mva *plainMap__ValueAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + la := plainMap__ValueAssemblerList{} + la.ca.w = &plainList{} + la.p = mva.ma + _, err := la.ca.BeginList(sizeHint) + return &la, err +} +func (mva *plainMap__ValueAssembler) AssignNull() error { + return mva.AssignNode(ipld.Null) +} +func (mva *plainMap__ValueAssembler) AssignBool(v bool) error { + vb := plainBool(v) + return mva.AssignNode(&vb) +} +func (mva *plainMap__ValueAssembler) AssignInt(v int) error { + vb := plainInt(v) + return mva.AssignNode(&vb) +} +func (mva *plainMap__ValueAssembler) AssignFloat(v float64) error { + vb := plainFloat(v) + return mva.AssignNode(&vb) +} +func (mva *plainMap__ValueAssembler) AssignString(v string) error { + vb := plainString(v) + return mva.AssignNode(&vb) +} +func (mva *plainMap__ValueAssembler) AssignBytes(v []byte) error { + vb := plainBytes(v) + return mva.AssignNode(&vb) +} +func (mva *plainMap__ValueAssembler) AssignLink(v ipld.Link) error { + vb := plainLink{v} + return mva.AssignNode(&vb) +} +func (mva *plainMap__ValueAssembler) AssignNode(v ipld.Node) error { + l := len(mva.ma.w.t) - 1 + mva.ma.w.t[l].v = v + mva.ma.w.m[string(mva.ma.w.t[l].k)] = v + mva.ma.state = maState_initial + mva.ma = nil // invalidate self to prevent further incorrect use. + return nil +} +func (plainMap__ValueAssembler) Prototype() ipld.NodePrototype { + return Prototype__Any{} +} + +type plainMap__ValueAssemblerMap struct { + ca plainMap__Assembler + p *plainMap__Assembler // pointer back to parent, for final insert and state bump +} + +// we briefly state only the methods we need to delegate here. +// just embedding plainMap__Assembler also behaves correctly, +// but causes a lot of unnecessary autogenerated functions in the final binary. + +func (ma *plainMap__ValueAssemblerMap) AssembleEntry(k string) (ipld.NodeAssembler, error) { + return ma.ca.AssembleEntry(k) +} +func (ma *plainMap__ValueAssemblerMap) AssembleKey() ipld.NodeAssembler { + return ma.ca.AssembleKey() +} +func (ma *plainMap__ValueAssemblerMap) AssembleValue() ipld.NodeAssembler { + return ma.ca.AssembleValue() +} +func (plainMap__ValueAssemblerMap) KeyPrototype() ipld.NodePrototype { + return Prototype__String{} +} +func (plainMap__ValueAssemblerMap) ValuePrototype(_ string) ipld.NodePrototype { + return Prototype__Any{} +} + +func (ma *plainMap__ValueAssemblerMap) Finish() error { + if err := ma.ca.Finish(); err != nil { + return err + } + w := ma.ca.w + ma.ca.w = nil + return ma.p.va.AssignNode(w) +} + +type plainMap__ValueAssemblerList struct { + ca plainList__Assembler + p *plainMap__Assembler // pointer back to parent, for final insert and state bump +} + +// we briefly state only the methods we need to delegate here. +// just embedding plainList__Assembler also behaves correctly, +// but causes a lot of unnecessary autogenerated functions in the final binary. + +func (la *plainMap__ValueAssemblerList) AssembleValue() ipld.NodeAssembler { + return la.ca.AssembleValue() +} +func (plainMap__ValueAssemblerList) ValuePrototype(_ int) ipld.NodePrototype { + return Prototype__Any{} +} + +func (la *plainMap__ValueAssemblerList) Finish() error { + if err := la.ca.Finish(); err != nil { + return err + } + w := la.ca.w + la.ca.w = nil + return la.p.va.AssignNode(w) +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/basic/string.go b/vendor/github.com/ipld/go-ipld-prime/node/basic/string.go new file mode 100644 index 0000000000..6dfb4cc0ca --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/basic/string.go @@ -0,0 +1,149 @@ +package basicnode + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/node/mixins" +) + +var ( + _ ipld.Node = plainString("") + _ ipld.NodePrototype = Prototype__String{} + _ ipld.NodeBuilder = &plainString__Builder{} + _ ipld.NodeAssembler = &plainString__Assembler{} +) + +func NewString(value string) ipld.Node { + v := plainString(value) + return &v +} + +// plainString is a simple boxed string that complies with ipld.Node. +// It's useful for many things, such as boxing map keys. +// +// The implementation is a simple typedef of a string; +// handling it as a Node incurs 'runtime.convTstring', +// which is about the best we can do. +type plainString string + +// -- Node interface methods --> + +func (plainString) ReprKind() ipld.ReprKind { + return ipld.ReprKind_String +} +func (plainString) LookupByString(string) (ipld.Node, error) { + return mixins.String{"string"}.LookupByString("") +} +func (plainString) LookupByNode(key ipld.Node) (ipld.Node, error) { + return mixins.String{"string"}.LookupByNode(nil) +} +func (plainString) LookupByIndex(idx int) (ipld.Node, error) { + return mixins.String{"string"}.LookupByIndex(0) +} +func (plainString) LookupBySegment(seg ipld.PathSegment) (ipld.Node, error) { + return mixins.String{"string"}.LookupBySegment(seg) +} +func (plainString) MapIterator() ipld.MapIterator { + return nil +} +func (plainString) ListIterator() ipld.ListIterator { + return nil +} +func (plainString) Length() int { + return -1 +} +func (plainString) IsAbsent() bool { + return false +} +func (plainString) IsNull() bool { + return false +} +func (plainString) AsBool() (bool, error) { + return mixins.String{"string"}.AsBool() +} +func (plainString) AsInt() (int, error) { + return mixins.String{"string"}.AsInt() +} +func (plainString) AsFloat() (float64, error) { + return mixins.String{"string"}.AsFloat() +} +func (x plainString) AsString() (string, error) { + return string(x), nil +} +func (plainString) AsBytes() ([]byte, error) { + return mixins.String{"string"}.AsBytes() +} +func (plainString) AsLink() (ipld.Link, error) { + return mixins.String{"string"}.AsLink() +} +func (plainString) Prototype() ipld.NodePrototype { + return Prototype__String{} +} + +// -- NodePrototype --> + +type Prototype__String struct{} + +func (Prototype__String) NewBuilder() ipld.NodeBuilder { + var w plainString + return &plainString__Builder{plainString__Assembler{w: &w}} +} + +// -- NodeBuilder --> + +type plainString__Builder struct { + plainString__Assembler +} + +func (nb *plainString__Builder) Build() ipld.Node { + return nb.w +} +func (nb *plainString__Builder) Reset() { + var w plainString + *nb = plainString__Builder{plainString__Assembler{w: &w}} +} + +// -- NodeAssembler --> + +type plainString__Assembler struct { + w *plainString +} + +func (plainString__Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return mixins.StringAssembler{"string"}.BeginMap(0) +} +func (plainString__Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return mixins.StringAssembler{"string"}.BeginList(0) +} +func (plainString__Assembler) AssignNull() error { + return mixins.StringAssembler{"string"}.AssignNull() +} +func (plainString__Assembler) AssignBool(bool) error { + return mixins.StringAssembler{"string"}.AssignBool(false) +} +func (plainString__Assembler) AssignInt(int) error { + return mixins.StringAssembler{"string"}.AssignInt(0) +} +func (plainString__Assembler) AssignFloat(float64) error { + return mixins.StringAssembler{"string"}.AssignFloat(0) +} +func (na *plainString__Assembler) AssignString(v string) error { + *na.w = plainString(v) + return nil +} +func (plainString__Assembler) AssignBytes([]byte) error { + return mixins.StringAssembler{"string"}.AssignBytes(nil) +} +func (plainString__Assembler) AssignLink(ipld.Link) error { + return mixins.StringAssembler{"string"}.AssignLink(nil) +} +func (na *plainString__Assembler) AssignNode(v ipld.Node) error { + if v2, err := v.AsString(); err != nil { + return err + } else { + *na.w = plainString(v2) + return nil + } +} +func (plainString__Assembler) Prototype() ipld.NodePrototype { + return Prototype__String{} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/HACKME.md b/vendor/github.com/ipld/go-ipld-prime/node/mixins/HACKME.md new file mode 100644 index 0000000000..a000a7f638 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/HACKME.md @@ -0,0 +1,37 @@ +node mixins and how to use them +=============================== + +These mixins are here to: + +1. reduce the amount of code you need to write to create a new Node implementation, and +2. standardize a lot of the error handling for common cases (especially, around kinds). + +"Reduce the amount of code" also has an application in codegen, +where while it doesn't save any human effort, it does reduce GLOC size. +(Or more precisely, it doesn't save *lines*, since we use them in verbose style, +but it does make those lines an awful lot shorter.) + +Note that these mixins are _not_ particularly here to help with performance. + +- all `ErrWrongKind` error are returned by value, which means a `runtime.convT2I` which means a heap allocation. + The error paths will therefore never be "fast"; it will *always* be cheaper + to check `kind` in advance than to probe and handle errors, if efficiency is your goal. +- in general, there's really no way to improve upon the performance of having these methods simply writen directlyon your type. + +These mixins will affect struct size if you use them via embed. +They can also be used without any effect on struct size if used more verbosely. + +The binary/assembly output size is not affected by use of the mixins. +(If using them verbosely -- e.g. still declaring methods on your type +and using `return mixins.Kind{"TypeName"}.Method()` in the method body -- +the end result is the inliner kicks in, and the end result is almost +identical binary size.) + +Summary: + +- SLOC: good, or neutral depending on use +- GLOC: good +- standardized: good +- speed: neutral +- mem size: neutral if used verbosely, bad if used most tersely +- asm size: neutral diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/boolMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/boolMixin.go new file mode 100644 index 0000000000..936ef91525 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/boolMixin.go @@ -0,0 +1,97 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// Bool can be embedded in a struct to provide all the methods that +// have fixed output for any int-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type Bool struct { + TypeName string +} + +func (Bool) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Bool +} +func (x Bool) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Bool} +} +func (x Bool) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Bool} +} +func (x Bool) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Bool} +} +func (x Bool) LookupBySegment(ipld.PathSegment) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupBySegment", AppropriateKind: ipld.ReprKindSet_Recursive, ActualKind: ipld.ReprKind_Bool} +} +func (Bool) MapIterator() ipld.MapIterator { + return nil +} +func (Bool) ListIterator() ipld.ListIterator { + return nil +} +func (Bool) Length() int { + return -1 +} +func (Bool) IsAbsent() bool { + return false +} +func (Bool) IsNull() bool { + return false +} +func (x Bool) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Bool} +} +func (x Bool) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Bool} +} +func (x Bool) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Bool} +} +func (x Bool) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Bool} +} +func (x Bool) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Bool} +} + +// BoolAssembler has similar purpose as Bool, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type BoolAssembler struct { + TypeName string +} + +func (x BoolAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Bool} +} +func (x BoolAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Bool} +} +func (x BoolAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_Bool} +} +func (x BoolAssembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Bool} +} +func (x BoolAssembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Bool} +} +func (x BoolAssembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Bool} +} +func (x BoolAssembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Bool} +} +func (x BoolAssembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Bool} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/bytesMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/bytesMixin.go new file mode 100644 index 0000000000..f992ae93fa --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/bytesMixin.go @@ -0,0 +1,97 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// Bytes can be embedded in a struct to provide all the methods that +// have fixed output for any int-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type Bytes struct { + TypeName string +} + +func (Bytes) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Bytes +} +func (x Bytes) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Bytes} +} +func (x Bytes) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Bytes} +} +func (x Bytes) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Bytes} +} +func (x Bytes) LookupBySegment(ipld.PathSegment) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupBySegment", AppropriateKind: ipld.ReprKindSet_Recursive, ActualKind: ipld.ReprKind_Bytes} +} +func (Bytes) MapIterator() ipld.MapIterator { + return nil +} +func (Bytes) ListIterator() ipld.ListIterator { + return nil +} +func (Bytes) Length() int { + return -1 +} +func (Bytes) IsAbsent() bool { + return false +} +func (Bytes) IsNull() bool { + return false +} +func (x Bytes) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Bytes} +} +func (x Bytes) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Bytes} +} +func (x Bytes) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Bytes} +} +func (x Bytes) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Bytes} +} +func (x Bytes) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Bytes} +} + +// BytesAssembler has similar purpose as Bytes, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type BytesAssembler struct { + TypeName string +} + +func (x BytesAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Bytes} +} +func (x BytesAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Bytes} +} +func (x BytesAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_Bytes} +} +func (x BytesAssembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Bytes} +} +func (x BytesAssembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Bytes} +} +func (x BytesAssembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Bytes} +} +func (x BytesAssembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Bytes} +} +func (x BytesAssembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Bytes} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/floatMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/floatMixin.go new file mode 100644 index 0000000000..6d59985ff6 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/floatMixin.go @@ -0,0 +1,97 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// Float can be embedded in a struct to provide all the methods that +// have fixed output for any int-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type Float struct { + TypeName string +} + +func (Float) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Float +} +func (x Float) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Float} +} +func (x Float) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Float} +} +func (x Float) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Float} +} +func (x Float) LookupBySegment(ipld.PathSegment) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupBySegment", AppropriateKind: ipld.ReprKindSet_Recursive, ActualKind: ipld.ReprKind_Float} +} +func (Float) MapIterator() ipld.MapIterator { + return nil +} +func (Float) ListIterator() ipld.ListIterator { + return nil +} +func (Float) Length() int { + return -1 +} +func (Float) IsAbsent() bool { + return false +} +func (Float) IsNull() bool { + return false +} +func (x Float) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Float} +} +func (x Float) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Float} +} +func (x Float) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Float} +} +func (x Float) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Float} +} +func (x Float) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Float} +} + +// FloatAssembler has similar purpose as Float, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type FloatAssembler struct { + TypeName string +} + +func (x FloatAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Float} +} +func (x FloatAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Float} +} +func (x FloatAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_Float} +} +func (x FloatAssembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Float} +} +func (x FloatAssembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Float} +} +func (x FloatAssembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Float} +} +func (x FloatAssembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Float} +} +func (x FloatAssembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Float} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/intMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/intMixin.go new file mode 100644 index 0000000000..d509d64368 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/intMixin.go @@ -0,0 +1,97 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// Int can be embedded in a struct to provide all the methods that +// have fixed output for any int-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type Int struct { + TypeName string +} + +func (Int) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Int +} +func (x Int) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Int} +} +func (x Int) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Int} +} +func (x Int) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Int} +} +func (x Int) LookupBySegment(ipld.PathSegment) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupBySegment", AppropriateKind: ipld.ReprKindSet_Recursive, ActualKind: ipld.ReprKind_Int} +} +func (Int) MapIterator() ipld.MapIterator { + return nil +} +func (Int) ListIterator() ipld.ListIterator { + return nil +} +func (Int) Length() int { + return -1 +} +func (Int) IsAbsent() bool { + return false +} +func (Int) IsNull() bool { + return false +} +func (x Int) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Int} +} +func (x Int) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Int} +} +func (x Int) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Int} +} +func (x Int) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Int} +} +func (x Int) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Int} +} + +// IntAssembler has similar purpose as Int, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type IntAssembler struct { + TypeName string +} + +func (x IntAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Int} +} +func (x IntAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Int} +} +func (x IntAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_Int} +} +func (x IntAssembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Int} +} +func (x IntAssembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Int} +} +func (x IntAssembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Int} +} +func (x IntAssembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Int} +} +func (x IntAssembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Int} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/linkMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/linkMixin.go new file mode 100644 index 0000000000..e9f016342a --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/linkMixin.go @@ -0,0 +1,97 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// Link can be embedded in a struct to provide all the methods that +// have fixed output for any int-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type Link struct { + TypeName string +} + +func (Link) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Link +} +func (x Link) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Link} +} +func (x Link) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Link} +} +func (x Link) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Link} +} +func (x Link) LookupBySegment(ipld.PathSegment) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupBySegment", AppropriateKind: ipld.ReprKindSet_Recursive, ActualKind: ipld.ReprKind_Link} +} +func (Link) MapIterator() ipld.MapIterator { + return nil +} +func (Link) ListIterator() ipld.ListIterator { + return nil +} +func (Link) Length() int { + return -1 +} +func (Link) IsAbsent() bool { + return false +} +func (Link) IsNull() bool { + return false +} +func (x Link) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Link} +} +func (x Link) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Link} +} +func (x Link) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Link} +} +func (x Link) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Link} +} +func (x Link) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Link} +} + +// LinkAssembler has similar purpose as Link, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type LinkAssembler struct { + TypeName string +} + +func (x LinkAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_Link} +} +func (x LinkAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Link} +} +func (x LinkAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_Link} +} +func (x LinkAssembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Link} +} +func (x LinkAssembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Link} +} +func (x LinkAssembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Link} +} +func (x LinkAssembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Link} +} +func (x LinkAssembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Link} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/listMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/listMixin.go new file mode 100644 index 0000000000..ec624f1bd7 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/listMixin.go @@ -0,0 +1,88 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// List can be embedded in a struct to provide all the methods that +// have fixed output for any int-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type List struct { + TypeName string +} + +func (List) ReprKind() ipld.ReprKind { + return ipld.ReprKind_List +} +func (x List) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_List} +} +func (x List) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_List} +} +func (List) MapIterator() ipld.MapIterator { + return nil +} +func (List) IsAbsent() bool { + return false +} +func (List) IsNull() bool { + return false +} +func (x List) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_List} +} +func (x List) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_List} +} +func (x List) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_List} +} +func (x List) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_List} +} +func (x List) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_List} +} +func (x List) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_List} +} + +// ListAssembler has similar purpose as List, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type ListAssembler struct { + TypeName string +} + +func (x ListAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_List} +} +func (x ListAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_List} +} +func (x ListAssembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_List} +} +func (x ListAssembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_List} +} +func (x ListAssembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_List} +} +func (x ListAssembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_List} +} +func (x ListAssembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_List} +} +func (x ListAssembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_List} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/mapMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/mapMixin.go new file mode 100644 index 0000000000..dae89667ad --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/mapMixin.go @@ -0,0 +1,85 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// Map can be embedded in a struct to provide all the methods that +// have fixed output for any map-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type Map struct { + TypeName string +} + +func (Map) ReprKind() ipld.ReprKind { + return ipld.ReprKind_Map +} +func (x Map) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Map} +} +func (Map) ListIterator() ipld.ListIterator { + return nil +} +func (Map) IsAbsent() bool { + return false +} +func (Map) IsNull() bool { + return false +} +func (x Map) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Map} +} +func (x Map) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Map} +} +func (x Map) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Map} +} +func (x Map) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Map} +} +func (x Map) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Map} +} +func (x Map) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Map} +} + +// MapAssembler has similar purpose as Map, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type MapAssembler struct { + TypeName string +} + +func (x MapAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_Map} +} +func (x MapAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_Map} +} +func (x MapAssembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_Map} +} +func (x MapAssembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_Map} +} +func (x MapAssembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_Map} +} +func (x MapAssembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_Map} +} +func (x MapAssembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_Map} +} +func (x MapAssembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_Map} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/stringMixin.go b/vendor/github.com/ipld/go-ipld-prime/node/mixins/stringMixin.go new file mode 100644 index 0000000000..f800e01622 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/stringMixin.go @@ -0,0 +1,97 @@ +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// String can be embedded in a struct to provide all the methods that +// have fixed output for any string-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type String struct { + TypeName string +} + +func (String) ReprKind() ipld.ReprKind { + return ipld.ReprKind_String +} +func (x String) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_String} +} +func (x String) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_String} +} +func (x String) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_String} +} +func (x String) LookupBySegment(ipld.PathSegment) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupBySegment", AppropriateKind: ipld.ReprKindSet_Recursive, ActualKind: ipld.ReprKind_String} +} +func (String) MapIterator() ipld.MapIterator { + return nil +} +func (String) ListIterator() ipld.ListIterator { + return nil +} +func (String) Length() int { + return -1 +} +func (String) IsAbsent() bool { + return false +} +func (String) IsNull() bool { + return false +} +func (x String) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_String} +} +func (x String) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_String} +} +func (x String) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_String} +} +func (x String) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_String} +} +func (x String) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_String} +} + +// StringAssembler has similar purpose as String, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type StringAssembler struct { + TypeName string +} + +func (x StringAssembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_String} +} +func (x StringAssembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_String} +} +func (x StringAssembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_String} +} +func (x StringAssembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_String} +} +func (x StringAssembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_String} +} +func (x StringAssembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_String} +} +func (x StringAssembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_String} +} +func (x StringAssembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_String} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/node/mixins/tmplMixin.txt b/vendor/github.com/ipld/go-ipld-prime/node/mixins/tmplMixin.txt new file mode 100644 index 0000000000..bd01419ca3 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/node/mixins/tmplMixin.txt @@ -0,0 +1,107 @@ +// copy this and remove methods that aren't relevant to your kind. +// this has not been scripted. +// (the first part is trivial; the second part is not; and this updates rarely. https://xkcd.com/1205/ applies.) + +package mixins + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// @Kind@ can be embedded in a struct to provide all the methods that +// have fixed output for any int-kinded nodes. +// (Mostly this includes all the methods which simply return ErrWrongKind.) +// Other methods will still need to be implemented to finish conforming to Node. +// +// To conserve memory and get a TypeName in errors without embedding, +// write methods on your type with a body that simply initializes this struct +// and immediately uses the relevant method; +// this is more verbose in source, but compiles to a tighter result: +// in memory, there's no embed; and in runtime, the calls will be inlined +// and thus have no cost in execution time. +type @Kind@ struct { + TypeName string +} + +func (@Kind@) ReprKind() ipld.ReprKind { + return ipld.ReprKind_@Kind@ +} +func (x @Kind@) LookupByString(string) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByString", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) LookupByNode(key ipld.Node) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByNode", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) LookupByIndex(idx int) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupByIndex", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) LookupBySegment(ipld.PathSegment) (ipld.Node, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "LookupBySegment", AppropriateKind: ipld.ReprKindSet_Recursive, ActualKind: ipld.ReprKind_@Kind@} +} +func (@Kind@) MapIterator() ipld.MapIterator { + return nil +} +func (@Kind@) ListIterator() ipld.ListIterator { + return nil +} +func (@Kind@) Length() int { + return -1 +} +func (@Kind@) IsAbsent() bool { + return false +} +func (@Kind@) IsNull() bool { + return false +} +func (x @Kind@) AsBool() (bool, error) { + return false, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) AsInt() (int, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) AsFloat() (float64, error) { + return 0, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) AsString() (string, error) { + return "", ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) AsBytes() ([]byte, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@) AsLink() (ipld.Link, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AsLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_@Kind@} +} + +// @Kind@Assembler has similar purpose as @Kind@, but for (you guessed it) +// the NodeAssembler interface rather than the Node interface. +type @Kind@Assembler struct { + TypeName string +} + +func (x @Kind@Assembler) BeginMap(sizeHint int) (ipld.MapAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginMap", AppropriateKind: ipld.ReprKindSet_JustMap, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) BeginList(sizeHint int) (ipld.ListAssembler, error) { + return nil, ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "BeginList", AppropriateKind: ipld.ReprKindSet_JustList, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) AssignNull() error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignNull", AppropriateKind: ipld.ReprKindSet_JustNull, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) AssignBool(bool) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBool", AppropriateKind: ipld.ReprKindSet_JustBool, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) AssignInt(int) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignInt", AppropriateKind: ipld.ReprKindSet_JustInt, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) AssignFloat(float64) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignFloat", AppropriateKind: ipld.ReprKindSet_JustFloat, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) AssignString(string) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignString", AppropriateKind: ipld.ReprKindSet_JustString, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) AssignBytes([]byte) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignBytes", AppropriateKind: ipld.ReprKindSet_JustBytes, ActualKind: ipld.ReprKind_@Kind@} +} +func (x @Kind@Assembler) AssignLink(ipld.Link) error { + return ipld.ErrWrongKind{TypeName: x.TypeName, MethodName: "AssignLink", AppropriateKind: ipld.ReprKindSet_JustLink, ActualKind: ipld.ReprKind_@Kind@} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/nodeBuilder.go b/vendor/github.com/ipld/go-ipld-prime/nodeBuilder.go new file mode 100644 index 0000000000..ce47cba321 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/nodeBuilder.go @@ -0,0 +1,139 @@ +package ipld + +// NodeAssembler is the interface that describes all the ways we can set values +// in a node that's under construction. +// +// To create a Node, you should start with a NodeBuilder (which contains a +// superset of the NodeAssembler methods, and can return the finished Node +// from its `Build` method). +// +// Why do both this and the NodeBuilder interface exist? +// When creating trees of nodes, recursion works over the NodeAssembler interface. +// This is important to efficient library internals, because avoiding the +// requirement to be able to return a Node at any random point in the process +// relieves internals from needing to implement 'freeze' features. +// (This is useful in turn because implementing those 'freeze' features in a +// language without first-class/compile-time support for them (as golang is) +// would tend to push complexity and costs to execution time; we'd rather not.) +type NodeAssembler interface { + BeginMap(sizeHint int) (MapAssembler, error) + BeginList(sizeHint int) (ListAssembler, error) + AssignNull() error + AssignBool(bool) error + AssignInt(int) error + AssignFloat(float64) error + AssignString(string) error + AssignBytes([]byte) error + AssignLink(Link) error + + AssignNode(Node) error // if you already have a completely constructed subtree, this method puts the whole thing in place at once. + + // Prototype returns a NodePrototype describing what kind of value we're assembling. + // + // You often don't need this (because you should be able to + // just feed data and check errors), but it's here. + // + // Using `this.Prototype().NewBuilder()` to produce a new `Node`, + // then giving that node to `this.AssignNode(n)` should always work. + // (Note that this is not necessarily an _exclusive_ statement on what + // sort of values will be accepted by `this.AssignNode(n)`.) + Prototype() NodePrototype +} + +// MapAssembler assembles a map node! (You guessed it.) +// +// Methods on MapAssembler must be called in a valid order: +// assemble a key, then assemble a value, then loop as long as desired; +// when finished, call 'Finish'. +// +// Incorrect order invocations will panic. +// Calling AssembleKey twice in a row will panic; +// calling AssembleValue before finishing using the NodeAssembler from AssembleKey will panic; +// calling AssembleValue twice in a row will panic; +// etc. +// +// Note that the NodeAssembler yielded from AssembleKey has additional behavior: +// if the node assembled there matches a key already present in the map, +// that assembler will emit the error! +type MapAssembler interface { + AssembleKey() NodeAssembler // must be followed by call to AssembleValue. + AssembleValue() NodeAssembler // must be called immediately after AssembleKey. + + AssembleEntry(k string) (NodeAssembler, error) // shortcut combining AssembleKey and AssembleValue into one step; valid when the key is a string kind. + + Finish() error + + // KeyPrototype returns a NodePrototype that knows how to build keys of a type this map uses. + // + // You often don't need this (because you should be able to + // just feed data and check errors), but it's here. + // + // For all Data Model maps, this will answer with a basic concept of "string". + // For Schema typed maps, this may answer with a more complex type (potentially even a struct type). + KeyPrototype() NodePrototype + + // ValuePrototype returns a NodePrototype that knows how to build values this map can contain. + // + // You often don't need this (because you should be able to + // just feed data and check errors), but it's here. + // + // ValuePrototype requires a parameter describing the key in order to say what + // NodePrototype will be acceptable as a value for that key, because when using + // struct types (or union types) from the Schemas system, they behave as maps + // but have different acceptable types for each field (or member, for unions). + // For plain maps (that is, not structs or unions masquerading as maps), + // the empty string can be used as a parameter, and the returned NodePrototype + // can be assumed applicable for all values. + // Using an empty string for a struct or union will return nil, + // as will using any string which isn't a field or member of those types. + // + // (Design note: a string is sufficient for the parameter here rather than + // a full Node, because the only cases where the value types vary are also + // cases where the keys may not be complex.) + ValuePrototype(k string) NodePrototype +} + +type ListAssembler interface { + AssembleValue() NodeAssembler + + Finish() error + + // ValuePrototype returns a NodePrototype that knows how to build values this map can contain. + // + // You often don't need this (because you should be able to + // just feed data and check errors), but it's here. + // + // ValuePrototype, much like the matching method on the MapAssembler interface, + // requires a parameter specifying the index in the list in order to say + // what NodePrototype will be acceptable as a value at that position. + // For many lists (and *all* lists which operate exclusively at the Data Model level), + // this will return the same NodePrototype regardless of the value of 'idx'; + // the only time this value will vary is when operating with a Schema, + // and handling the representation NodeAssembler for a struct type with + // a representation of a list kind. + // If you know you are operating in a situation that won't have varying + // NodePrototypes, it is acceptable to call `ValuePrototype(0)` and use the + // resulting NodePrototype for all reasoning. + ValuePrototype(idx int) NodePrototype +} + +type NodeBuilder interface { + NodeAssembler + + // Build returns the new value after all other assembly has been completed. + // + // A method on the NodeAssembler that finishes assembly of the data must + // be called first (e.g., any of the "Assign*" methods, or "Finish" if + // the assembly was for a map or a list); that finishing method still has + // all responsibility for validating the assembled data and returning + // any errors from that process. + // (Correspondingly, there is no error return from this method.) + Build() Node + + // Resets the builder. It can hereafter be used again. + // Reusing a NodeBuilder can reduce allocations and improve performance. + // + // Only call this if you're going to reuse the builder. + // (Otherwise, it's unnecessary, and may cause an unwanted allocation). + Reset() +} diff --git a/vendor/github.com/ipld/go-ipld-prime/path.go b/vendor/github.com/ipld/go-ipld-prime/path.go new file mode 100644 index 0000000000..fef7b8d66a --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/path.go @@ -0,0 +1,193 @@ +package ipld + +import ( + "strings" +) + +// Path describes a series of steps across a tree or DAG of Node, +// where each segment in the path is a map key or list index +// (literaly, Path is a slice of PathSegment values). +// Path is used in describing progress in a traversal; and +// can also be used as an instruction for traversing from one Node to another. +// Path values will also often be encountered as part of error messages. +// +// (Note that Paths are useful as an instruction for traversing from +// *one* Node to *one* other Node; to do a walk from one Node and visit +// *several* Nodes based on some sort of pattern, look to IPLD Selectors, +// and the 'traversal/selector' package in this project.) +// +// Path values are always relative. +// Observe how 'traversal.Focus' requires both a Node and a Path argument -- +// where to start, and where to go, respectively. +// Similarly, error values which include a Path will be speaking in reference +// to the "starting Node" in whatever context they arose from. +// +// The canonical form of a Path is as a list of PathSegment. +// Each PathSegment is a string; by convention, the string should be +// in UTF-8 encoding and use NFC normalization, but all operations +// will regard the string as its constituent eight-bit bytes. +// +// There are no illegal or magical characters in IPLD Paths +// (in particular, do not mistake them for UNIX system paths). +// IPLD Paths can only go down: that is, each segment must traverse one node. +// There is no ".." which means "go up"; +// and there is no "." which means "stay here". +// IPLD Paths have no magic behavior around characters such as "~". +// IPLD Paths do not have a concept of "globs" nor behave specially +// for a path segment string of "*" (but you may wish to see 'Selectors' +// for globbing-like features that traverse over IPLD data). +// +// An empty string is a valid PathSegment. +// (This leads to some unfortunate complications when wishing to represent +// paths in a simple string format; however, consider that maps do exist +// in serialized data in the wild where an empty string is used as the key: +// it is important we be able to correctly describe and address this!) +// +// A string containing "/" (or even being simply "/"!) is a valid PathSegment. +// (As with empty strings, this is unfortunate (in particular, because it +// very much doesn't match up well with expectations popularized by UNIX-like +// filesystems); but, as with empty strings, maps which contain such a key +// certainly exist, and it is important that we be able to regard them!) +// +// A string starting, ending, or otherwise containing the NUL (\x00) byte +// is also a valid PathSegment. This follows from the rule of "a string is +// regarded as its constituent eight-bit bytes": an all-zero byte is not exceptional. +// In golang, this doesn't pose particular difficulty, but note this would be +// of marked concern for languages which have "C-style nul-terminated strings". +// +// For an IPLD Path to be represented as a string, an encoding system +// including escaping is necessary. At present, there is not a single +// canonical specification for such an escaping; we expect to decide one +// in the future, but this is not yet settled and done. +// (This implementation has a 'String' method, but it contains caveats +// and may be ambiguous for some content. This may be fixed in the future.) +type Path struct { + segments []PathSegment +} + +// NewPath returns a Path composed of the given segments. +// +// This constructor function does a defensive copy, +// in case your segments slice should mutate in the future. +// (Use NewPathNocopy if this is a performance concern, +// and you're sure you know what you're doing.) +func NewPath(segments []PathSegment) Path { + p := Path{make([]PathSegment, len(segments))} + copy(p.segments, segments) + return p +} + +// NewPathNocopy is identical to NewPath but trusts that +// the segments slice you provide will not be mutated. +func NewPathNocopy(segments []PathSegment) Path { + return Path{segments} +} + +// ParsePath converts a string to an IPLD Path, doing a basic parsing of the +// string using "/" as a delimiter to produce a segmented Path. +// This is a handy, but not a general-purpose nor spec-compliant (!), +// way to create a Path: it cannot represent all valid paths. +// +// Multiple subsequent "/" characters will be silently collapsed. +// E.g., `"foo///bar"` will be treated equivalently to `"foo/bar"`. +// Prefixed and suffixed extraneous "/" characters are also discarded. +// This makes this constructor incapable of handling some possible Path values +// (specifically: paths with empty segements cannot be created with this constructor). +// +// There is no escaping mechanism used by this function. +// This makes this constructor incapable of handling some possible Path values +// (specifically, a path segment containing "/" cannot be created, because it +// will always be intepreted as a segment separator). +// +// No other "cleaning" of the path occurs. See the documentation of the Path struct; +// in particular, note that ".." does not mean "go up", nor does "." mean "stay here" -- +// correspondingly, there isn't anything to "clean" in the same sense as +// 'filepath.Clean' from the standard library filesystem path packages would. +// +// If the provided string contains unprintable characters, or non-UTF-8 +// or non-NFC-canonicalized bytes, no remark will be made about this, +// and those bytes will remain part of the PathSegments in the resulting Path. +func ParsePath(pth string) Path { + // FUTURE: we should probably have some escaping mechanism which makes + // it possible to encode a slash in a segment. Specification needed. + ss := strings.FieldsFunc(pth, func(r rune) bool { return r == '/' }) + ssl := len(ss) + p := Path{make([]PathSegment, ssl)} + for i := 0; i < ssl; i++ { + p.segments[i] = PathSegmentOfString(ss[i]) + } + return p +} + +// String representation of a Path is simply the join of each segment with '/'. +// It does not include a leading nor trailing slash. +// +// This is a handy, but not a general-purpose nor spec-compliant (!), +// way to reduce a Path to a string. +// There is no escaping mechanism used by this function, +// and as a result, not all possible valid Path values (such as those with +// empty segments or with segments containing "/") can be encoded unambiguously. +// For Path values containing these problematic segments, ParsePath applied +// to the string returned from this function may return a nonequal Path value. +// +// No escaping for unprintable characters is provided. +// No guarantee that the resulting string is UTF-8 nor NFC canonicalized +// is provided unless all the constituent PathSegment had those properties. +func (p Path) String() string { + l := len(p.segments) + if l == 0 { + return "" + } + sb := strings.Builder{} + for i := 0; i < l-1; i++ { + sb.WriteString(p.segments[i].String()) + sb.WriteByte('/') + } + sb.WriteString(p.segments[l-1].String()) + return sb.String() +} + +// Segements returns a slice of the path segment strings. +// +// It is not lawful to mutate nor append the returned slice. +func (p Path) Segments() []PathSegment { + return p.segments +} + +// Join creates a new path composed of the concatenation of this and the given path's segments. +func (p Path) Join(p2 Path) Path { + combinedSegments := make([]PathSegment, len(p.segments)+len(p2.segments)) + copy(combinedSegments, p.segments) + copy(combinedSegments[len(p.segments):], p2.segments) + p.segments = combinedSegments + return p +} + +// AppendSegmentString is as per Join, but a shortcut when appending single segments using strings. +func (p Path) AppendSegment(ps PathSegment) Path { + l := len(p.segments) + combinedSegments := make([]PathSegment, l+1) + copy(combinedSegments, p.segments) + combinedSegments[l] = ps + p.segments = combinedSegments + return p +} + +// AppendSegmentString is as per Join, but a shortcut when appending single segments using strings. +func (p Path) AppendSegmentString(ps string) Path { + return p.AppendSegment(PathSegmentOfString(ps)) +} + +// Parent returns a path with the last of its segments popped off (or +// the zero path if it's already empty). +func (p Path) Parent() Path { + if len(p.segments) == 0 { + return Path{} + } + return Path{p.segments[0 : len(p.segments)-1]} +} + +// Truncate returns a path with only as many segments remaining as requested. +func (p Path) Truncate(i int) Path { + return Path{p.segments[0:i]} +} diff --git a/vendor/github.com/ipld/go-ipld-prime/pathSegment.go b/vendor/github.com/ipld/go-ipld-prime/pathSegment.go new file mode 100644 index 0000000000..fd0238c77d --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/pathSegment.go @@ -0,0 +1,136 @@ +package ipld + +import ( + "strconv" +) + +// PathSegment can describe either a key in a map, or an index in a list. +// +// Create a PathSegment via either ParsePathSegment, PathSegmentOfString, +// or PathSegmentOfInt; or, via one of the constructors of Path, +// which will implicitly create PathSegment internally. +// Using PathSegment's natural zero value directly is discouraged +// (it will act like ParsePathSegment("0"), which likely not what you'd expect). +// +// Path segments are "stringly typed" -- they may be interpreted as either strings or ints depending on context. +// A path segment of "123" will be used as a string when traversing a node of map kind; +// and it will be converted to an integer when traversing a node of list kind. +// (If a path segment string cannot be parsed to an int when traversing a node of list kind, then traversal will error.) +// It is not possible to ask which kind (string or integer) a PathSegment is, because that is not defined -- this is *only* intepreted contextually. +// +// Internally, PathSegment will store either a string or an integer, +// depending on how it was constructed, +// and will automatically convert to the other on request. +// (This means if two pieces of code communicate using PathSegment, +// one producing ints and the other expecting ints, +// then they will work together efficiently.) +// PathSegment in a Path produced by ParsePath generally have all strings internally, +// because there is no distinction possible when parsing a Path string +// (and attempting to pre-parse all strings into ints "just in case" would waste time in almost all cases). +// +// Be cautious of attempting to use PathSegment as a map key! +// Due to the implementation detail of internal storage, it's possible for +// PathSegment values which are "equal" per PathSegment.Equal's definition +// to still be unequal in the eyes of golang's native maps. +// You should probably use the string values of the PathSegment as map keys. +// (This has the additional bonus of hitting a special fastpath that the golang +// built-in maps have specifically for plain string keys.) +// +type PathSegment struct { + /* + A quick implementation note about the Go compiler and "union" semantics: + + There are roughly two ways to do "union" semantics in Go. + + The first is to make a struct with each of the values. + + The second is to make an interface and use an unexported method to keep it closed. + + The second tactic provides somewhat nicer semantics to the programmer. + (Namely, it's clearly impossible to have two inhabitants, which is... the point.) + The downside is... putting things in interfaces generally incurs an allocation + (grep your assembly output for "runtime.conv*"). + + The first tactic looks kludgier, and would seem to waste memory + (the struct reserves space for each possible value, even though the semantic is that only one may be non-zero). + However, in most cases, more *bytes* are cheaper than more *allocs* -- + garbage collection costs are domininated by alloc count, not alloc size. + + Because PathSegment is something we expect to put in fairly "hot" paths, + we're using the first tactic. + + (We also currently get away with having no extra discriminator bit + because we use a signed int for indexes, and negative values aren't valid there, + and thus we can use it as a sentinel value. + (Fun note: Empty strings were originally used for this sentinel, + but it turns out empty strings are valid PathSegment themselves, so!)) + */ + + s string + i int +} + +// ParsePathSegment parses a string into a PathSegment, +// handling any escaping if present. +// (Note: there is currently no escaping specified for PathSegments, +// so this is currently functionally equivalent to PathSegmentOfString.) +func ParsePathSegment(s string) PathSegment { + return PathSegment{s: s, i: -1} +} + +// PathSegmentOfString boxes a string into a PathSegment. +// It does not attempt to parse any escaping; use ParsePathSegment for that. +func PathSegmentOfString(s string) PathSegment { + return PathSegment{s: s, i: -1} +} + +// PathSegmentOfString boxes an int into a PathSegment. +func PathSegmentOfInt(i int) PathSegment { + return PathSegment{i: i} +} + +// containsString is unexported because we use it to see what our *storage* form is, +// but this is considered an implementation detail that's non-semantic. +// If it returns false, it implicitly means "containsInt", as these are the only options. +func (ps PathSegment) containsString() bool { + return ps.i < 0 +} + +// String returns the PathSegment as a string. +func (ps PathSegment) String() string { + switch ps.containsString() { + case true: + return ps.s + case false: + return strconv.Itoa(ps.i) + } + panic("unreachable") +} + +// Index returns the PathSegment as an int, +// or returns an error if the segment is a string that can't be parsed as an int. +func (ps PathSegment) Index() (int, error) { + switch ps.containsString() { + case true: + return strconv.Atoi(ps.s) + case false: + return ps.i, nil + } + panic("unreachable") +} + +// Equals checks if two PathSegment values are equal. +// +// Because PathSegment is "stringly typed", this comparison does not +// regard if one of the segments is stored as a string and one is stored as an int; +// if string values of two segments are equal, they are "equal" overall. +// In other words, `PathSegmentOfInt(2).Equals(PathSegmentOfString("2")) == true`! +// (You should still typically prefer this method over converting two segments +// to string and comparing those, because even though that may be functionally +// correct, this method will be faster if they're both ints internally.) +func (x PathSegment) Equals(o PathSegment) bool { + if !x.containsString() && !o.containsString() { + return x.i == o.i + } + return x.String() == o.String() +} diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/errors.go b/vendor/github.com/ipld/go-ipld-prime/schema/errors.go new file mode 100644 index 0000000000..340ed5d96c --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/errors.go @@ -0,0 +1,62 @@ +package schema + +import ( + "fmt" + + "github.com/ipld/go-ipld-prime" +) + +// TODO: errors in this package remain somewhat slapdash. +// +// - ipld.ErrUnmatchable is used as a catch-all in some places, and contains who-knows-what values wrapped in the Reason field. +// - sometimes this wraps things like strconv errors... and on the one hand, i'm kinda okay with that; on the other, maybe saying a bit more with types before getting to that kind of shrug would be nice. +// - we probably want to use `Type` values, right? +// - or do we: because then we probably need a `Repr bool` next to it, or lots of messages would be nonsensical. +// - this is *currently* problematic because we don't actually generate type info consts yet. Hopefully soon; but the pain, meanwhile, is... substantial. +// - "substantial" is an understatement. it makes incremental development almost impossible because stringifying error reports turn into nil pointer crashes! +// - other ipld-wide errors like `ipld.ErrWrongKind` *sometimes* refer to a TypeName... but don't *have* to, because they also arise at the merely-datamodel level; what would we do with these? +// - it's undesirable (not to mention intensely forbidden for import cycle reasons) for those error types to refer to schema.Type. +// - if we must have TypeName treated stringily in some cases, is it really useful to use full type info in other cases -- inconsistently? +// - regardless of where we end up with this, some sort of an embed for helping deal with munging and printing this would probably be wise. +// - generally, whether you should expect an "ipld.Err*" or a "schema.Err*" from various methods is quite unclear. +// - it's possible that we should wrap *all* schema-level errors in a single "ipld.ErrSchemaNoMatch" error of some kind, to fix the above. as yet undecided. + +// ErrNoSuchField may be returned from lookup functions on the Node +// interface when a field is requested which doesn't exist, +// or from assigning data into on a MapAssembler for a struct +// when the key doesn't match a field name in the structure +// (or, when assigning data into a ListAssembler and the list size has +// reached out of bounds, in case of a struct with list-like representations!). +type ErrNoSuchField struct { + Type Type + + Field ipld.PathSegment +} + +func (e ErrNoSuchField) Error() string { + if e.Type == nil { + return fmt.Sprintf("no such field: {typeinfomissing}.%s", e.Field) + } + return fmt.Sprintf("no such field: %s.%s", e.Type.Name(), e.Field) +} + +// ErrNotUnionStructure means data was fed into a union assembler that can't match the union. +// +// This could have one of several reasons, which are explained in the detail text: +// +// - there are too many entries in the map; +// - the keys of critical entries aren't found; +// - keys are found that aren't any of the expected critical keys; +// - etc. +// +// TypeName is currently a string... see comments at the top of this file for +// remarks on the issues we need to address about these identifiers in errors in general. +type ErrNotUnionStructure struct { + TypeName string + + Detail string +} + +func (e ErrNotUnionStructure) Error() string { + return fmt.Sprintf("cannot match schema: union structure constraints for %s caused rejection: %s", e.TypeName, e.Detail) +} diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/kind.go b/vendor/github.com/ipld/go-ipld-prime/schema/kind.go new file mode 100644 index 0000000000..604a82d247 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/kind.go @@ -0,0 +1,108 @@ +package schema + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +// Kind is an enum of kind in the IPLD Schema system. +// +// Note that schema.Kind is distinct from ipld.ReprKind! +// Schema kinds include concepts such as "struct" and "enum", which are +// concepts only introduced by the Schema layer, and not present in the +// Data Model layer. +type Kind uint8 + +const ( + Kind_Invalid Kind = 0 + Kind_Map Kind = '{' + Kind_List Kind = '[' + Kind_Unit Kind = '1' + Kind_Bool Kind = 'b' + Kind_Int Kind = 'i' + Kind_Float Kind = 'f' + Kind_String Kind = 's' + Kind_Bytes Kind = 'x' + Kind_Link Kind = '/' + Kind_Struct Kind = '$' + Kind_Union Kind = '^' + Kind_Enum Kind = '%' + // FUTURE: Kind_Any = '?'? +) + +func (k Kind) String() string { + switch k { + case Kind_Invalid: + return "Invalid" + case Kind_Map: + return "Map" + case Kind_List: + return "List" + case Kind_Unit: + return "Unit" + case Kind_Bool: + return "Bool" + case Kind_Int: + return "Int" + case Kind_Float: + return "Float" + case Kind_String: + return "String" + case Kind_Bytes: + return "Bytes" + case Kind_Link: + return "Link" + case Kind_Struct: + return "Struct" + case Kind_Union: + return "Union" + case Kind_Enum: + return "Enum" + default: + panic("invalid enumeration value!") + } +} + +// ActsLike returns a constant from the ipld.ReprKind enum describing what +// this schema.Kind acts like at the Data Model layer. +// +// Things with similar names are generally conserved +// (e.g. "map" acts like "map"); +// concepts added by the schema layer have to be mapped onto something +// (e.g. "struct" acts like "map"). +// +// Note that this mapping describes how a typed Node will *act*, programmatically; +// it does not necessarily describe how it will be *serialized* +// (for example, a struct will always act like a map, even if it has a tuple +// representation strategy and thus becomes a list when serialized). +func (k Kind) ActsLike() ipld.ReprKind { + switch k { + case Kind_Invalid: + return ipld.ReprKind_Invalid + case Kind_Map: + return ipld.ReprKind_Map + case Kind_List: + return ipld.ReprKind_List + case Kind_Unit: + return ipld.ReprKind_Bool // maps to 'true'. + case Kind_Bool: + return ipld.ReprKind_Bool + case Kind_Int: + return ipld.ReprKind_Int + case Kind_Float: + return ipld.ReprKind_Float + case Kind_String: + return ipld.ReprKind_String + case Kind_Bytes: + return ipld.ReprKind_Bytes + case Kind_Link: + return ipld.ReprKind_Link + case Kind_Struct: + return ipld.ReprKind_Map // clear enough: fields are keys. + case Kind_Union: + return ipld.ReprKind_Map // REVIEW: unions are tricky. + case Kind_Enum: + return ipld.ReprKind_String // 'AsString' is the one clear thing to define. + default: + panic("invalid enumeration value!") + } +} diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/maybe.go b/vendor/github.com/ipld/go-ipld-prime/schema/maybe.go new file mode 100644 index 0000000000..bdc4693309 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/maybe.go @@ -0,0 +1,9 @@ +package schema + +type Maybe uint8 + +const ( + Maybe_Absent = Maybe(0) + Maybe_Null = Maybe(1) + Maybe_Value = Maybe(2) +) diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/tmpBuilders.go b/vendor/github.com/ipld/go-ipld-prime/schema/tmpBuilders.go new file mode 100644 index 0000000000..3b93599aef --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/tmpBuilders.go @@ -0,0 +1,177 @@ +package schema + +import ( + "fmt" + + "github.com/ipld/go-ipld-prime" +) + +// Everything in this file is __a temporary hack__ and will be __removed__. +// +// These methods will only hang around until more of the "ast" packages are finished; +// thereafter, building schema.Type and schema.TypeSystem values will only be +// possible through first constructing a schema AST, and *then* using Reify(), +// which will validate things correctly, cycle-check, cross-link, etc. +// +// (Meanwhile, we're using these methods in the codegen prototypes.) + +// These methods use Type objects as parameters when pointing to other things, +// but this is... turning out consistently problematic. +// Even when we're doing this hacky direct-call doesn't-need-to-be-serializable temp stuff, +// as written, this doesn't actually let us express cyclic things viably! +// The same initialization questions are also going to come up again when we try to make +// concrete values in the output of codegen. +// Maybe it's actually just a bad idea to have our reified Type types use Type pointers at all. +// (I will never get tired of the tongue twisters, evidently.) +// I'm not actually using that much, and it's always avoidable (it's trivial to replace with a map lookup bouncing through a 'ts' variable somewhere). +// And having the AST gen'd types be... just... the thing... sounds nice. It could save a lot of work. +// (It would mean the golang types don't tell you whether the values have been checked for global properties or not, but, eh.) +// (It's not really compatible with "Prototype and Type are the same thing for codegen'd stuff", either (or, we need more interfaces, and to *really* lean into them), but maybe that's okay.) + +func SpawnString(name TypeName) *TypeString { + return &TypeString{typeBase{name, nil}} +} + +func SpawnBool(name TypeName) *TypeBool { + return &TypeBool{typeBase{name, nil}} +} + +func SpawnInt(name TypeName) *TypeInt { + return &TypeInt{typeBase{name, nil}} +} + +func SpawnFloat(name TypeName) *TypeFloat { + return &TypeFloat{typeBase{name, nil}} +} + +func SpawnBytes(name TypeName) *TypeBytes { + return &TypeBytes{typeBase{name, nil}} +} + +func SpawnLink(name TypeName) *TypeLink { + return &TypeLink{typeBase{name, nil}, "", false} +} + +func SpawnLinkReference(name TypeName, pointsTo TypeName) *TypeLink { + return &TypeLink{typeBase{name, nil}, pointsTo, true} +} + +func SpawnList(name TypeName, valueType TypeName, nullable bool) *TypeList { + return &TypeList{typeBase{name, nil}, false, valueType, nullable} +} + +func SpawnMap(name TypeName, keyType TypeName, valueType TypeName, nullable bool) *TypeMap { + return &TypeMap{typeBase{name, nil}, false, keyType, valueType, nullable} +} + +func SpawnStruct(name TypeName, fields []StructField, repr StructRepresentation) *TypeStruct { + v := &TypeStruct{ + typeBase{name, nil}, + fields, + make(map[string]StructField, len(fields)), + repr, + } + for i := range fields { + fields[i].parent = v + v.fieldsMap[fields[i].name] = fields[i] + } + switch repr.(type) { + case StructRepresentation_Stringjoin: + for _, f := range fields { + if f.IsMaybe() { + panic("neither nullable nor optional is supported on struct stringjoin representation") + } + } + } + return v +} +func SpawnStructField(name string, typ TypeName, optional bool, nullable bool) StructField { + return StructField{nil /*populated later*/, name, typ, optional, nullable} +} +func SpawnStructRepresentationMap(renames map[string]string) StructRepresentation_Map { + return StructRepresentation_Map{renames, nil} +} +func SpawnStructRepresentationTuple() StructRepresentation_Tuple { + return StructRepresentation_Tuple{} +} +func SpawnStructRepresentationStringjoin(delim string) StructRepresentation_Stringjoin { + return StructRepresentation_Stringjoin{delim} +} + +func SpawnUnion(name TypeName, members []TypeName, repr UnionRepresentation) *TypeUnion { + return &TypeUnion{typeBase{name, nil}, members, repr} +} +func SpawnUnionRepresentationKeyed(table map[string]TypeName) UnionRepresentation_Keyed { + return UnionRepresentation_Keyed{table} +} +func SpawnUnionRepresentationKinded(table map[ipld.ReprKind]TypeName) UnionRepresentation_Kinded { + return UnionRepresentation_Kinded{table} +} + +// The methods relating to TypeSystem are also mutation-heavy and placeholdery. + +func (ts *TypeSystem) Init() { + ts.namedTypes = make(map[TypeName]Type) +} +func (ts *TypeSystem) Accumulate(typ Type) { + typ._Type(ts) + ts.namedTypes[typ.Name()] = typ +} +func (ts TypeSystem) GetTypes() map[TypeName]Type { + return ts.namedTypes +} +func (ts TypeSystem) TypeByName(n string) Type { + return ts.namedTypes[TypeName(n)] +} + +// ValidateGraph checks that all type names referenced are defined. +// +// It does not do any other validations of individual type's sensibleness +// (that should've happened when they were created +// (although also note many of those validates are NYI, +// and are roadmapped for after we research self-hosting)). +func (ts TypeSystem) ValidateGraph() []error { + var ee []error + for tn, t := range ts.namedTypes { + switch t2 := t.(type) { + case *TypeBool, + *TypeInt, + *TypeFloat, + *TypeString, + *TypeBytes, + *TypeEnum: + continue // nothing to check: these are leaf nodes and refer to no other types. + case *TypeLink: + if !t2.hasReferencedType { + continue + } + if _, ok := ts.namedTypes[t2.referencedType]; !ok { + ee = append(ee, fmt.Errorf("type %s refers to missing type %s (as link reference type)", tn, t2.referencedType)) + } + case *TypeStruct: + for _, f := range t2.fields { + if _, ok := ts.namedTypes[f.typ]; !ok { + ee = append(ee, fmt.Errorf("type %s refers to missing type %s (in field %s)", tn, f.typ, f.name)) + } + } + case *TypeMap: + if _, ok := ts.namedTypes[t2.keyType]; !ok { + ee = append(ee, fmt.Errorf("type %s refers to missing type %s (as key type)", tn, t2.keyType)) + } + if _, ok := ts.namedTypes[t2.valueType]; !ok { + ee = append(ee, fmt.Errorf("type %s refers to missing type %s (as key type)", tn, t2.valueType)) + } + case *TypeList: + if _, ok := ts.namedTypes[t2.valueType]; !ok { + ee = append(ee, fmt.Errorf("type %s refers to missing type %s (as key type)", tn, t2.valueType)) + } + case *TypeUnion: + for _, mn := range t2.members { + if _, ok := ts.namedTypes[mn]; !ok { + ee = append(ee, fmt.Errorf("type %s refers to missing type %s (as a member)", tn, mn)) + } + } + } + } + return ee +} diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/type.go b/vendor/github.com/ipld/go-ipld-prime/schema/type.go new file mode 100644 index 0000000000..2d47163dd6 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/type.go @@ -0,0 +1,240 @@ +package schema + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +type TypeName string // = ast.TypeName + +func (tn TypeName) String() string { return string(tn) } + +// typesystem.Type is an union interface; each of the `Type*` concrete types +// in this package are one of its members. +// +// Specifically, +// +// TypeBool +// TypeString +// TypeBytes +// TypeInt +// TypeFloat +// TypeMap +// TypeList +// TypeLink +// TypeUnion +// TypeStruct +// TypeEnum +// +// are all of the kinds of Type. +// +// This is a closed union; you can switch upon the above members without +// including a default case. The membership is closed by the unexported +// '_Type' method; you may use the BurntSushi/go-sumtype tool to check +// your switches for completeness. +// +// Many interesting properties of each Type are only defined for that specific +// type, so it's typical to use a type switch to handle each type of Type. +// (Your humble author is truly sorry for the word-mash that results from +// attempting to describe the types that describe the typesystem.Type.) +// +// For example, to inspect the kind of fields in a struct: you might +// cast a `Type` interface into `TypeStruct`, and then the `Fields()` on +// that `TypeStruct` can be inspected. (`Fields()` isn't defined for any +// other kind of Type.) +type Type interface { + // Unexported marker method to force the union closed. + // Also used to set the internal pointer back to the universe its part of. + _Type(*TypeSystem) + + // Returns a pointer to the TypeSystem this Type is a member of. + TypeSystem() *TypeSystem + + // Returns the string name of the Type. This name is unique within the + // universe this type is a member of, *unless* this type is Anonymous, + // in which case a string describing the type will still be returned, but + // that string will not be required to be unique. + Name() TypeName + + // Returns the Kind of this Type. + // + // The returned value is a 1:1 association with which of the concrete + // "schema.Type*" structs this interface can be cast to. + // + // Note that a schema.Kind is a different enum than ipld.ReprKind; + // and furthermore, there's no strict relationship between them. + // schema.TypedNode values can be described by *two* distinct ReprKinds: + // one which describes how the Node itself will act, + // and another which describes how the Node presents for serialization. + // For some combinations of Type and representation strategy, one or both + // of the ReprKinds can be determined statically; but not always: + // it can sometimes be necessary to inspect the value quite concretely + // (e.g., `schema.TypedNode{}.Representation().ReprKind()`) in order to find + // out exactly how a node will be serialized! This is because some types + // can vary in representation kind based on their value (specifically, + // kinded-representation unions have this property). + Kind() Kind + + // RepresentationBehavior returns a description of how the representation + // of this type will behave in terms of the IPLD Data Model. + // This property varies based on the representation strategy of a type. + // + // In one case, the representation behavior cannot be known statically, + // and varies based on the data: kinded unions have this trait. + // + // This property is used by kinded unions, which require that their members + // all have distinct representation behavior. + // (It follows that a kinded union cannot have another kinded union as a member.) + // + // You may also be interested in a related property that might have been called "TypeBehavior". + // However, this method doesn't exist, because it's a deterministic property of `Kind()`! + // You can use `Kind.ActsLike()` to get type-level behavioral information. + RepresentationBehavior() ipld.ReprKind +} + +var ( + _ Type = &TypeBool{} + _ Type = &TypeString{} + _ Type = &TypeBytes{} + _ Type = &TypeInt{} + _ Type = &TypeFloat{} + _ Type = &TypeMap{} + _ Type = &TypeList{} + _ Type = &TypeLink{} + _ Type = &TypeUnion{} + _ Type = &TypeStruct{} + _ Type = &TypeEnum{} +) + +type typeBase struct { + name TypeName + universe *TypeSystem +} + +type TypeBool struct { + typeBase +} + +type TypeString struct { + typeBase +} + +type TypeBytes struct { + typeBase +} + +type TypeInt struct { + typeBase +} + +type TypeFloat struct { + typeBase +} + +type TypeMap struct { + typeBase + anonymous bool + keyType TypeName // must be ReprKind==string (e.g. Type==String|Enum). + valueType TypeName + valueNullable bool +} + +type TypeList struct { + typeBase + anonymous bool + valueType TypeName + valueNullable bool +} + +type TypeLink struct { + typeBase + referencedType TypeName + hasReferencedType bool + // ...? +} + +type TypeUnion struct { + typeBase + // Members are listed in the order they appear in the schema. + // To find the discriminant info, you must look inside the representation; they all contain a 'table' of some kind in which the member types are the values. + // Note that multiple appearances of the same type as distinct members of the union is not possible. + // While we could do this... A: that's... odd, and nearly never called for; B: not possible with kinded mode; C: imagine the golang-native type switch! it's impossible. + // We rely on this clarity in many ways: most visibly, the type-level Node implementation for a union always uses the type names as if they were map keys! This behavior is consistent for all union representations. + members []TypeName + representation UnionRepresentation +} + +type UnionRepresentation interface{ _UnionRepresentation() } + +func (UnionRepresentation_Keyed) _UnionRepresentation() {} +func (UnionRepresentation_Kinded) _UnionRepresentation() {} +func (UnionRepresentation_Envelope) _UnionRepresentation() {} +func (UnionRepresentation_Inline) _UnionRepresentation() {} + +// A bunch of these tables in union representation might be easier to use if flipped; +// we almost always index into them by type (since that's what we have an ordered list of); +// and they're unique in both directions, so it's equally valid either way. +// The order they're currently written in matches the serial form in the schema AST. + +type UnionRepresentation_Keyed struct { + table map[string]TypeName // key is user-defined freetext +} +type UnionRepresentation_Kinded struct { + table map[ipld.ReprKind]TypeName +} +type UnionRepresentation_Envelope struct { + discriminantKey string + contentKey string + table map[string]TypeName // key is user-defined freetext +} +type UnionRepresentation_Inline struct { + discriminantKey string + table map[string]TypeName // key is user-defined freetext +} + +type TypeStruct struct { + typeBase + // n.b. `Fields` is an (order-preserving!) map in the schema-schema; + // but it's a list here, with the keys denormalized into the value, + // because that's typically how we use it. + fields []StructField + fieldsMap map[string]StructField // same content, indexed for lookup. + representation StructRepresentation +} +type StructField struct { + parent *TypeStruct + name string + typ TypeName + optional bool + nullable bool +} + +type StructRepresentation interface{ _StructRepresentation() } + +func (StructRepresentation_Map) _StructRepresentation() {} +func (StructRepresentation_Tuple) _StructRepresentation() {} +func (StructRepresentation_StringPairs) _StructRepresentation() {} +func (StructRepresentation_Stringjoin) _StructRepresentation() {} + +type StructRepresentation_Map struct { + renames map[string]string + implicits map[string]ImplicitValue +} +type StructRepresentation_Tuple struct{} +type StructRepresentation_StringPairs struct{ sep1, sep2 string } +type StructRepresentation_Stringjoin struct{ sep string } + +type TypeEnum struct { + typeBase + members []string +} + +// ImplicitValue is an sum type holding values that are implicits. +// It's not an 'Any' value because it can't be recursive +// (or to be slightly more specific, it can be one of the recursive kinds, +// but if so, only its empty value is valid here). +type ImplicitValue interface{ _ImplicitValue() } + +type ImplicitValue_EmptyList struct{} +type ImplicitValue_EmptyMap struct{} +type ImplicitValue_String struct{ x string } +type ImplicitValue_Int struct{ x int } diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/typeMethods.go b/vendor/github.com/ipld/go-ipld-prime/schema/typeMethods.go new file mode 100644 index 0000000000..41f06470e7 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/typeMethods.go @@ -0,0 +1,255 @@ +package schema + +import ( + ipld "github.com/ipld/go-ipld-prime" +) + +/* cookie-cutter standard interface stuff */ + +func (t *typeBase) _Type(ts *TypeSystem) { + t.universe = ts +} +func (t typeBase) TypeSystem() *TypeSystem { return t.universe } +func (t typeBase) Name() TypeName { return t.name } + +func (TypeBool) Kind() Kind { return Kind_Bool } +func (TypeString) Kind() Kind { return Kind_String } +func (TypeBytes) Kind() Kind { return Kind_Bytes } +func (TypeInt) Kind() Kind { return Kind_Int } +func (TypeFloat) Kind() Kind { return Kind_Float } +func (TypeMap) Kind() Kind { return Kind_Map } +func (TypeList) Kind() Kind { return Kind_List } +func (TypeLink) Kind() Kind { return Kind_Link } +func (TypeUnion) Kind() Kind { return Kind_Union } +func (TypeStruct) Kind() Kind { return Kind_Struct } +func (TypeEnum) Kind() Kind { return Kind_Enum } + +func (TypeBool) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_Bool } +func (TypeString) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_String } +func (TypeBytes) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_Bytes } +func (TypeInt) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_Int } +func (TypeFloat) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_Float } +func (TypeMap) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_Map } +func (TypeList) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_List } +func (TypeLink) RepresentationBehavior() ipld.ReprKind { return ipld.ReprKind_Link } +func (t TypeUnion) RepresentationBehavior() ipld.ReprKind { + switch t.representation.(type) { + case UnionRepresentation_Keyed: + return ipld.ReprKind_Map + case UnionRepresentation_Kinded: + return ipld.ReprKind_Invalid // you can't know with this one, until you see the value (and thus can its inhabitant's behavior)! + case UnionRepresentation_Envelope: + return ipld.ReprKind_Map + case UnionRepresentation_Inline: + return ipld.ReprKind_Map + default: + panic("unreachable") + } +} +func (t TypeStruct) RepresentationBehavior() ipld.ReprKind { + switch t.representation.(type) { + case StructRepresentation_Map: + return ipld.ReprKind_Map + case StructRepresentation_Tuple: + return ipld.ReprKind_List + case StructRepresentation_StringPairs: + return ipld.ReprKind_String + case StructRepresentation_Stringjoin: + return ipld.ReprKind_String + default: + panic("unreachable") + } +} +func (t TypeEnum) RepresentationBehavior() ipld.ReprKind { + // TODO: this should have a representation strategy switch too; sometimes that will indicate int representation behavior. + return ipld.ReprKind_String +} + +/* interesting methods per Type type */ + +// beware: many of these methods will change when we successfully bootstrap self-hosting. +// +// The current methods return reified Type objects; in the future, there might be less of that. +// Returning reified Type objects requires bouncing lookups through the typesystem map; +// this is unavoidable because we need to handle cycles in definitions. +// However, the extra (and cyclic) pointers that requires won't necessarily jive well if +// we remake the Type types to have close resemblances to the Data Model tree data.) +// +// It's also unfortunate that some of the current methods collide in name with +// the names of the Data Model fields. We might reshuffling things to reduce this. +// +// At any rate, all of these changes will come as a sweep once we +// get a self-hosting gen of the schema-schema, not before +// (the effort of updating template references is substantial). + +// IsAnonymous is returns true if the type was unnamed. Unnamed types will +// claim to have a Name property like `{Foo:Bar}`, and this is not guaranteed +// to be a unique string for all types in the universe. +func (t TypeMap) IsAnonymous() bool { + return t.anonymous +} + +// KeyType returns the Type of the map keys. +// +// Note that map keys will must always be some type which is representable as a +// string in the IPLD Data Model (e.g. either TypeString or TypeEnum). +func (t TypeMap) KeyType() Type { + return t.universe.namedTypes[t.keyType] +} + +// ValueType returns the Type of the map values. +func (t TypeMap) ValueType() Type { + return t.universe.namedTypes[t.valueType] +} + +// ValueIsNullable returns a bool describing if the map values are permitted +// to be null. +func (t TypeMap) ValueIsNullable() bool { + return t.valueNullable +} + +// IsAnonymous is returns true if the type was unnamed. Unnamed types will +// claim to have a Name property like `[Foo]`, and this is not guaranteed +// to be a unique string for all types in the universe. +func (t TypeList) IsAnonymous() bool { + return t.anonymous +} + +// ValueType returns to the Type of the list values. +func (t TypeList) ValueType() Type { + return t.universe.namedTypes[t.valueType] +} + +// ValueIsNullable returns a bool describing if the list values are permitted +// to be null. +func (t TypeList) ValueIsNullable() bool { + return t.valueNullable +} + +// Members returns the list of all types that are possible inhabitants of this union. +func (t TypeUnion) Members() []Type { + a := make([]Type, len(t.members)) + for i := range t.members { + a[i] = t.universe.namedTypes[t.members[i]] + } + return a +} + +func (t TypeUnion) RepresentationStrategy() UnionRepresentation { + return t.representation +} + +func (r UnionRepresentation_Keyed) GetDiscriminant(t Type) string { + for d, t2 := range r.table { + if t2 == t.Name() { + return d + } + } + panic("that type isn't a member of this union") +} + +// GetMember returns type info for the member matching the kind argument, +// or may return nil if that kind is not mapped to a member of this union. +func (r UnionRepresentation_Kinded) GetMember(k ipld.ReprKind) TypeName { + return r.table[k] +} + +// Fields returns a slice of descriptions of the object's fields. +func (t TypeStruct) Fields() []StructField { + a := make([]StructField, len(t.fields)) + for i := range t.fields { + a[i] = t.fields[i] + } + return a +} + +// Field looks up a StructField by name, or returns nil if no such field. +func (t TypeStruct) Field(name string) *StructField { + if v, ok := t.fieldsMap[name]; ok { + return &v + } + return nil +} + +// Parent returns the type information that this field describes a part of. +// +// While in many cases, you may know the parent already from context, +// there may still be situations where want to pass around a field and +// not need to continue passing down the parent type with it; this method +// helps your code be less redundant in such a situation. +// (You'll find this useful for looking up any rename directives, for example, +// when holding onto a field, since that requires looking up information from +// the representation strategy, which is a property of the type as a whole.) +func (f StructField) Parent() *TypeStruct { return f.parent } + +// Name returns the string name of this field. The name is the string that +// will be used as a map key if the structure this field is a member of is +// serialized as a map representation. +func (f StructField) Name() string { return f.name } + +// Type returns the Type of this field's value. Note the field may +// also be unset if it is either Optional or Nullable. +func (f StructField) Type() Type { return f.parent.universe.namedTypes[f.typ] } + +// IsOptional returns true if the field is allowed to be absent from the object. +// If IsOptional is false, the field may be absent from the serial representation +// of the object entirely. +// +// Note being optional is different than saying the value is permitted to be null! +// A field may be both nullable and optional simultaneously, or either, or neither. +func (f StructField) IsOptional() bool { return f.optional } + +// IsNullable returns true if the field value is allowed to be null. +// +// If is Nullable is false, note that it's still possible that the field value +// will be absent if the field is Optional! Being nullable is unrelated to +// whether the field's presence is optional as a whole. +// +// Note that a field may be both nullable and optional simultaneously, +// or either, or neither. +func (f StructField) IsNullable() bool { return f.nullable } + +// IsMaybe returns true if the field value is allowed to be either null or absent. +// +// This is a simple "or" of the two properties, +// but this method is a shorthand that turns out useful often. +func (f StructField) IsMaybe() bool { return f.nullable || f.optional } + +func (t TypeStruct) RepresentationStrategy() StructRepresentation { + return t.representation +} + +func (r StructRepresentation_Map) GetFieldKey(field StructField) string { + if n, ok := r.renames[field.name]; ok { + return n + } + return field.name +} + +func (r StructRepresentation_Stringjoin) GetDelim() string { + return r.sep +} + +// Members returns a slice the strings which are valid inhabitants of this enum. +func (t TypeEnum) Members() []string { + a := make([]string, len(t.members)) + for i := range t.members { + a[i] = t.members[i] + } + return a +} + +// Links can keep a referenced type, which is a hint only about the data on the +// other side of the link, no something that can be explicitly validated without +// loading the link + +// HasReferencedType returns true if the link has a hint about the type it references +// false if it's generic +func (t TypeLink) HasReferencedType() bool { + return t.hasReferencedType +} + +// ReferencedType returns the type hint for the node on the other side of the link +func (t TypeLink) ReferencedType() Type { + return t.universe.namedTypes[t.referencedType] +} diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/typedNode.go b/vendor/github.com/ipld/go-ipld-prime/schema/typedNode.go new file mode 100644 index 0000000000..3ae60ebc14 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/typedNode.go @@ -0,0 +1,73 @@ +package schema + +import ( + "github.com/ipld/go-ipld-prime" +) + +// schema.TypedNode is a superset of the ipld.Node interface, and has additional behaviors. +// +// A schema.TypedNode can be inspected for its schema.Type and schema.Kind, +// which conveys much more and richer information than the Data Model layer +// ipld.ReprKind. +// +// There are many different implementations of schema.TypedNode. +// One implementation can wrap any other existing ipld.Node (i.e., it's zero-copy) +// and promises that it has *already* been validated to match the typesystem.Type; +// another implementation similarly wraps any other existing ipld.Node, but +// defers to the typesystem validation checking to fields that are accessed; +// and when using code generation tools, all of the generated native Golang +// types produced by the codegen will each individually implement schema.TypedNode. +// +// Typed nodes sometimes have slightly different behaviors than plain nodes: +// For example, when looking up fields on a typed node that's a struct, +// the error returned for a lookup with a key that's not a field name will +// be ErrNoSuchField (instead of ErrNotExists). +// These behaviors apply to the schema.TypedNode only and not their representations; +// continuing the example, the .Representation().LookupByString() method on +// that same node for the same key as plain `.LookupByString()` will still +// return ErrNotExists, because the representation isn't a schema.TypedNode! +type TypedNode interface { + // schema.TypedNode acts just like a regular Node for almost all purposes; + // which ipld.ReprKind it acts as is determined by the TypeKind. + // (Note that the representation strategy of the type does *not* affect + // the ReprKind of schema.TypedNode -- rather, the representation strategy + // affects the `.Representation().ReprKind()`.) + // + // For example: if the `.Type().Kind()` of this node is "struct", + // it will act like ReprKind() == "map" + // (even if Type().(Struct).ReprStrategy() is "tuple"). + ipld.Node + + // Type returns a reference to the reified schema.Type value. + Type() Type + + // Representation returns an ipld.Node which sees the data in this node + // in its representation form. + // + // For example: if the `.Type().Kind()` of this node is "struct", + // `.Representation().Kind()` may vary based on its representation strategy: + // if the representation strategy is "map", then it will be ReprKind=="map"; + // if the streatgy is "tuple", then it will be ReprKind=="list". + Representation() ipld.Node +} + +// schema.TypedLinkNode is a superset of the schema.TypedNode interface, and has one additional behavior. +// +// A schema.TypedLinkNode contains a hint for the appropriate node builder to use for loading data +// on the other side of the link contained within the node, so that it can be assembled +// into a node representation and validated against the schema as quickly as possible +// +// So, for example, if you wanted to support loading the other side of a link +// with a code-gen'd node builder while utilizing the automatic loading facilities +// of the traversal package, you could write a LinkNodeBuilderChooser as follows: +// +// func LinkNodeBuilderChooser(lnk ipld.Link, lnkCtx ipld.LinkContext) ipld.NodePrototype { +// if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { +// return tlnkNd.LinkTargetNodePrototype() +// } +// return basicnode.Prototype__Any{} +// } +// +type TypedLinkNode interface { + LinkTargetNodePrototype() ipld.NodePrototype +} diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/typesystem.go b/vendor/github.com/ipld/go-ipld-prime/schema/typesystem.go new file mode 100644 index 0000000000..8e1420c2c6 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/typesystem.go @@ -0,0 +1,14 @@ +package schema + +type TypeSystem struct { + // namedTypes is the set of all named types in this universe. + // The map's key is the value's Name() property and must be unique. + // + // The IsAnonymous property is false for all values in this map that + // support the IsAnonymous property. + // + // Each Type in the universe may only refer to other types in their + // definition if those type are either A) in this namedTypes map, + // or B) are IsAnonymous==true. + namedTypes map[TypeName]Type +} diff --git a/vendor/github.com/ipld/go-ipld-prime/schema/validate.go b/vendor/github.com/ipld/go-ipld-prime/schema/validate.go new file mode 100644 index 0000000000..c9465261dc --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/schema/validate.go @@ -0,0 +1,70 @@ +package schema + +/* + Okay, so. There are several fun considerations for a "validate" method. + + --- + + There's two radically different approaches to "validate"/"reify": + + - Option 1: Look at the schema.Type info and check if a data node seems + to match it -- recursing on the type info. + - Option 2: Use the schema.Type{}.RepresentationNodeBuilder() to feed data + into it -- recursing on what the nodebuilder already expresses. + + (Option 2 also need to take a `memStorage ipld.NodeBuilder` param, btw, + for handling all the cases where we *aren't* doing codegen.) + + Option 1 provides a little more opportunity for returning multiple errors. + Option 2 will generally have a hard time with that (nodebuilers are not + necessarily in a valid state after their first error encounter). + + As a result of having these two options at all, we may indeed end up with + at least two very different functions -- despite seeming to do similar + things, their interior will radically diverge. + + --- + + We may also need to consider distinct reification paths: we may want one + that returns a new node tree which is eagerly converted to schema.TypedNode + recursively; and another that returns a lazyNode which wraps things + with their typed node constraints only as they're requested. + (Note that the latter would have interesting implications for any code + which has expectations about pointer equality consistency.) + + --- + + A further fun issue which needs consideration: well, I'll just save a snip + of prospective docs I wrote while trying to iterate on these functions: + + // Note that using Validate on a node that's already a schema.TypedNode is likely + // to be nonsensical. In many schemas, the schema.TypedNode tree is actually a + // different depth than its representational tree (e.g. unions can cause this), + + ... and that's ... that's a fairly sizable issue that needs resolving. + There's a couple of different ways to handle some of the behaviors around + unions, and some of them make the tradeoff described above, and I'm really + unsure if all the implications have been sussed out yet. We should defer + writing code that depends on this issue until gathering some more info. + + --- + + One more note: about returning multiple errors from a Validate function: + there's an upper bound of the utility of the thing. Going farther than the + first parse error is nice, but it will still hit limits: for example, + upon encountering a union and failing to match it, we can't generally + produce further errors from anywhere deeper in the tree without them being + combinatorial "if previous juncture X was type Y, then..." nonsense. + (This applies to all recursive kinds to some degree, but it's especially + rough with unions. For most of the others, it's flatly a missing field, + or an excessive field, or a leaf error; with unions it can be hard to tell.) + + --- + + And finally: both "Validate" and "Reify" methods might actually belong + in the schema.TypedNode package -- if they make *any* reference to `schema.TypedNode`, + then they have no choice (otherwise, cyclic imports would occur). + If we make a "Validate" that works purely on the schema.Type info, and + returns *only* errors: only then we can have it in the schema package. + +*/ diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/common.go b/vendor/github.com/ipld/go-ipld-prime/traversal/common.go new file mode 100644 index 0000000000..de9bf2adea --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/common.go @@ -0,0 +1,47 @@ +package traversal + +import ( + "context" + "fmt" + "io" + + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/schema" +) + +// init sets all the values in TraveralConfig to reasonable defaults +// if they're currently the zero value. +// +// Note that you're absolutely going to need to replace the +// LinkLoader and LinkNodeBuilderChooser if you want automatic link traversal; +// the defaults return error and/or panic. +func (tc *Config) init() { + if tc.Ctx == nil { + tc.Ctx = context.Background() + } + if tc.LinkLoader == nil { + tc.LinkLoader = func(ipld.Link, ipld.LinkContext) (io.Reader, error) { + return nil, fmt.Errorf("no link loader configured") + } + } + if tc.LinkTargetNodePrototypeChooser == nil { + tc.LinkTargetNodePrototypeChooser = func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { + if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { + return tlnkNd.LinkTargetNodePrototype(), nil + } + return nil, fmt.Errorf("no LinkTargetNodePrototypeChooser configured") + } + } + if tc.LinkStorer == nil { + tc.LinkStorer = func(ipld.LinkContext) (io.Writer, ipld.StoreCommitter, error) { + return nil, nil, fmt.Errorf("no link storer configured") + } + } +} + +func (prog *Progress) init() { + if prog.Cfg == nil { + prog.Cfg = &Config{} + } + prog.Cfg.init() +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/doc.go b/vendor/github.com/ipld/go-ipld-prime/traversal/doc.go new file mode 100644 index 0000000000..822a9ec760 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/doc.go @@ -0,0 +1,57 @@ +// This package provides functional utilities for traversing and transforming +// IPLD nodes. +// +// The traversal.Path type provides a description of how to perform +// several steps across a Node tree. These are dual purpose: +// Paths can be used as instructions to do some traversal, and +// Paths are accumulated during traversals as a log of progress. +// +// "Focus" functions provide syntactic sugar for using ipld.Path to jump +// to a Node deep in a tree of other Nodes. +// +// "FocusedTransform" functions can do the same such deep jumps, and support +// mutation as well! +// (Of course, since ipld.Node is an immutable interface, more precisely +// speaking, "transformations" are implemented rebuilding trees of nodes to +// emulate mutation in a copy-on-write way.) +// +// "Walk" functions perform a walk of a Node graph, and apply visitor +// functions multiple Nodes. The more advanced Walk functions can be guided +// by Selectors, which provide a declarative mechanism for guiding the +// traversal and filtering which Nodes are of interest. +// (See the selector sub-package for more detail.) +// +// "WalkTransforming" is similar to Traverse, but with support for mutations. +// Like "FocusTransform", "WalkTransforming" operates in a copy-on-write way. +// +// All of these functions -- the "Focus*" and "Walk*" family alike -- +// work via callbacks: they do the traversal, and call a user-provided function +// with a handle to the reached Node. Further "Focus" and "Walk" can be used +// recursively within this callback. +// +// All of these functions -- the "Focus*" and "Walk*" family alike -- +// include support for automatic resolution and loading of new Node trees +// whenever IPLD Links are encountered. This can be configured freely +// by providing LinkLoader interfaces to the traversal.Config. +// +// Some notes on the limits of usage: +// +// The "*Transform" family of methods is most appropriate for patterns of usage +// which resemble point mutations. +// More general transformations -- zygohylohistomorphisms, etc -- will be best +// implemented by composing the read-only systems (e.g. Focus, Traverse) and +// handling the accumulation in the visitor functions. +// +// (Why? The "point mutation" use-case gets core library support because +// it's both high utility and highly clear how to implement it. +// More advanced transformations are nontrivial to provide generalized support +// for, for three reasons: efficiency is hard; not all existing research into +// categorical recursion schemes is necessarily applicable without modification +// (efficient behavior in a merkle-tree context is not the same as efficient +// behavior on uniform memory!); and we have the further compounding complexity +// of the range of choices available for underlying Node implementation. +// Therefore, attempts at generalization are not included here; handling these +// issues in concrete cases is easy, so we call it an application logic concern. +// However, exploring categorical recursion schemes as a library is encouraged!) +// +package traversal diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/fns.go b/vendor/github.com/ipld/go-ipld-prime/traversal/fns.go new file mode 100644 index 0000000000..d0902dfbbe --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/fns.go @@ -0,0 +1,69 @@ +package traversal + +import ( + "context" + + ipld "github.com/ipld/go-ipld-prime" +) + +// This file defines interfaces for things users provide, +// plus a few of the parameters they'll need to receieve. +//-------------------------------------------------------- + +// VisitFn is a read-only visitor. +type VisitFn func(Progress, ipld.Node) error + +// TransformFn is like a visitor that can also return a new Node to replace the visited one. +type TransformFn func(Progress, ipld.Node) (ipld.Node, error) + +// AdvVisitFn is like VisitFn, but for use with AdvTraversal: it gets additional arguments describing *why* this node is visited. +type AdvVisitFn func(Progress, ipld.Node, VisitReason) error + +// VisitReason provides additional information to traversals using AdvVisitFn. +type VisitReason byte + +const ( + VisitReason_SelectionMatch VisitReason = 'm' // Tells AdvVisitFn that this node was explicitly selected. (This is the set of nodes that VisitFn is called for.) + VisitReason_SelectionParent VisitReason = 'p' // Tells AdvVisitFn that this node is a parent of one that will be explicitly selected. (These calls only happen if the feature is enabled -- enabling parent detection requires a different algorithm and adds some overhead.) + VisitReason_SelectionCandidate VisitReason = 'x' // Tells AdvVisitFn that this node was visited while searching for selection matches. It is not necessarily implied that any explicit match will be a child of this node; only that we had to consider it. (Merkle-proofs generally need to include any node in this group.) +) + +type Progress struct { + Cfg *Config + Path ipld.Path // Path is how we reached the current point in the traversal. + LastBlock struct { // LastBlock stores the Path and Link of the last block edge we had to load. (It will always be zero in traversals with no linkloader.) + Path ipld.Path + Link ipld.Link + } +} + +type Config struct { + Ctx context.Context // Context carried through a traversal. Optional; use it if you need cancellation. + LinkLoader ipld.Loader // Loader used for automatic link traversal. + LinkTargetNodePrototypeChooser LinkTargetNodePrototypeChooser // Chooser for Node implementations to produce during automatic link traversal. + LinkStorer ipld.Storer // Storer used if any mutation features (e.g. traversal.Transform) are used. +} + +// LinkTargetNodePrototypeChooser is a function that returns a NodePrototype based on +// the information in a Link and/or its LinkContext. +// +// A LinkTargetNodePrototypeChooser can be used in a traversal.Config to be clear about +// what kind of Node implementation to use when loading a Link. +// In a simple example, it could constantly return a `basicnode.Prototype__Any{}`. +// In a more complex example, a program using `bind` over native Go types +// could decide what kind of native type is expected, and return a +// `bind.NodeBuilder` for that specific concrete native type. +type LinkTargetNodePrototypeChooser func(ipld.Link, ipld.LinkContext) (ipld.NodePrototype, error) + +// SkipMe is a signalling "error" which can be used to tell traverse to skip some data. +// +// SkipMe can be returned by the Config.LinkLoader to skip entire blocks without aborting the walk. +// (This can be useful if you know you don't have data on hand, +// but want to continue the walk in other areas anyway; +// or, if you're doing a way where you know that it's valid to memoize seen +// areas based on Link alone.) +type SkipMe struct{} + +func (SkipMe) Error() string { + return "skip" +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/focus.go b/vendor/github.com/ipld/go-ipld-prime/traversal/focus.go new file mode 100644 index 0000000000..27c6d04c72 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/focus.go @@ -0,0 +1,183 @@ +package traversal + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// Focus traverses a Node graph according to a path, reaches a single Node, +// and calls the given VisitFn on that reached node. +// +// This function is a helper function which starts a new traversal with default configuration. +// It cannot cross links automatically (since this requires configuration). +// Use the equivalent Focus function on the Progress structure +// for more advanced and configurable walks. +func Focus(n ipld.Node, p ipld.Path, fn VisitFn) error { + return Progress{}.Focus(n, p, fn) +} + +// Get is the equivalent of Focus, but returns the reached node (rather than invoking a callback at the target), +// and does not yield Progress information. +// +// This function is a helper function which starts a new traversal with default configuration. +// It cannot cross links automatically (since this requires configuration). +// Use the equivalent Get function on the Progress structure +// for more advanced and configurable walks. +func Get(n ipld.Node, p ipld.Path) (ipld.Node, error) { + return Progress{}.Get(n, p) +} + +// FocusedTransform traverses an ipld.Node graph, reaches a single Node, +// and calls the given TransformFn to decide what new node to replace the visited node with. +// A new Node tree will be returned (the original is unchanged). +// +// This function is a helper function which starts a new traversal with default configuration. +// It cannot cross links automatically (since this requires configuration). +// Use the equivalent FocusedTransform function on the Progress structure +// for more advanced and configurable walks. +func FocusedTransform(n ipld.Node, p ipld.Path, fn TransformFn) (ipld.Node, error) { + return Progress{}.FocusedTransform(n, p, fn) +} + +// Focus traverses a Node graph according to a path, reaches a single Node, +// and calls the given VisitFn on that reached node. +// +// Focus is a read-only traversal. +// See FocusedTransform if looking for a way to do an "update" to a Node. +// +// Provide configuration to this process using the Config field in the Progress object. +// +// This walk will automatically cross links, but requires some configuration +// with link loading functions to do so. +// +// Focus (and the other traversal functions) can be used again again inside the VisitFn! +// By using the traversal.Progress handed to the VisitFn, +// the Path recorded of the traversal so far will continue to be extended, +// and thus continued nested uses of Walk and Focus will see the fully contextualized Path. +func (prog Progress) Focus(n ipld.Node, p ipld.Path, fn VisitFn) error { + n, err := prog.get(n, p, true) + if err != nil { + return err + } + return fn(prog, n) +} + +// Get is the equivalent of Focus, but returns the reached node (rather than invoking a callback at the target), +// and does not yield Progress information. +// +// Provide configuration to this process using the Config field in the Progress object. +// +// This walk will automatically cross links, but requires some configuration +// with link loading functions to do so. +// +// If doing several traversals which are nested, consider using the Focus funcion in preference to Get; +// the Focus functions provide updated Progress objects which can be used to do nested traversals while keeping consistent track of progress, +// such that continued nested uses of Walk or Focus or Get will see the fully contextualized Path. +func (prog Progress) Get(n ipld.Node, p ipld.Path) (ipld.Node, error) { + return prog.get(n, p, false) +} + +// get is the internal implementation for Focus and Get. +// It *mutates* the Progress object it's called on, and returns reached nodes. +// For Get calls, trackProgress=false, which avoids some allocations for state tracking that's not needed by that call. +func (prog *Progress) get(n ipld.Node, p ipld.Path, trackProgress bool) (ipld.Node, error) { + prog.init() + segments := p.Segments() + var prev ipld.Node // for LinkContext + for i, seg := range segments { + // Traverse the segment. + switch n.ReprKind() { + case ipld.ReprKind_Invalid: + panic(fmt.Errorf("invalid node encountered at %q", p.Truncate(i))) + case ipld.ReprKind_Map: + next, err := n.LookupByString(seg.String()) + if err != nil { + return nil, fmt.Errorf("error traversing segment %q on node at %q: %s", seg, p.Truncate(i), err) + } + prev, n = n, next + case ipld.ReprKind_List: + intSeg, err := seg.Index() + if err != nil { + return nil, fmt.Errorf("error traversing segment %q on node at %q: the segment cannot be parsed as a number and the node is a list", seg, p.Truncate(i)) + } + next, err := n.LookupByIndex(intSeg) + if err != nil { + return nil, fmt.Errorf("error traversing segment %q on node at %q: %s", seg, p.Truncate(i), err) + } + prev, n = n, next + default: + return nil, fmt.Errorf("cannot traverse node at %q: %s", p.Truncate(i), fmt.Errorf("cannot traverse terminals")) + } + // Dereference any links. + for n.ReprKind() == ipld.ReprKind_Link { + lnk, _ := n.AsLink() + // Assemble the LinkContext in case the Loader or NBChooser want it. + lnkCtx := ipld.LinkContext{ + LinkPath: p.Truncate(i), + LinkNode: n, + ParentNode: prev, + } + // Pick what in-memory format we will build. + np, err := prog.Cfg.LinkTargetNodePrototypeChooser(lnk, lnkCtx) + if err != nil { + return nil, fmt.Errorf("error traversing node at %q: could not load link %q: %s", p.Truncate(i+1), lnk, err) + } + nb := np.NewBuilder() + // Load link! + err = lnk.Load( + prog.Cfg.Ctx, + lnkCtx, + nb, + prog.Cfg.LinkLoader, + ) + if err != nil { + return nil, fmt.Errorf("error traversing node at %q: could not load link %q: %s", p.Truncate(i+1), lnk, err) + } + if trackProgress { + prog.LastBlock.Path = p.Truncate(i + 1) + prog.LastBlock.Link = lnk + } + prev, n = n, nb.Build() + } + } + if trackProgress { + prog.Path = prog.Path.Join(p) + } + return n, nil +} + +// FocusedTransform traverses an ipld.Node graph, reaches a single Node, +// and calls the given TransformFn to decide what new node to replace the visited node with. +// A new Node tree will be returned (the original is unchanged). +// +// If the TransformFn returns the same Node which it was called with, +// then the transform is a no-op, and the Node returned from the +// FocusedTransform call as a whole will also be the same as its starting Node. +// +// Otherwise, the reached node will be "replaced" with the new Node -- meaning +// that new intermediate nodes will be constructed to also replace each +// parent Node that was traversed to get here, thus propagating the changes in +// a copy-on-write fashion -- and the FocusedTransform function as a whole will +// return a new Node containing identical children except for those replaced. +// +// FocusedTransform can be used again inside the applied function! +// This kind of composition can be useful for doing batches of updates. +// E.g. if have a large Node graph which contains a 100-element list, and +// you want to replace elements 12, 32, and 95 of that list: +// then you should FocusedTransform to the list first, and inside that +// TransformFn's body, you can replace the entire list with a new one +// that is composed of copies of everything but those elements -- including +// using more TransformFn calls as desired to produce the replacement elements +// if it so happens that those replacement elements are easiest to construct +// by regarding them as incremental updates to the previous values. +// +// Note that anything you can do with the Transform function, you can also +// do with regular Node and NodeBuilder usage directly. Transform just +// does a large amount of the intermediate bookkeeping that's useful when +// creating new values which are partial updates to existing values. +// +// This feature is not yet implemented. +func (prog Progress) FocusedTransform(n ipld.Node, p ipld.Path, fn TransformFn) (ipld.Node, error) { + panic("TODO") // TODO surprisingly different from Focus -- need to store nodes we traversed, and able do building. +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/builder/builder.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/builder/builder.go new file mode 100644 index 0000000000..f2e6fc55b9 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/builder/builder.go @@ -0,0 +1,167 @@ +package builder + +import ( + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/fluent" + selector "github.com/ipld/go-ipld-prime/traversal/selector" +) + +// SelectorSpec is a specification for a selector that can build +// a selector ipld.Node or an actual parsed Selector +type SelectorSpec interface { + Node() ipld.Node + Selector() (selector.Selector, error) +} + +// SelectorSpecBuilder is a utility interface to build selector ipld nodes +// quickly. +// +// It serves two purposes: +// 1. Save the user of go-ipld-prime time and mental overhead with an easy +// interface for making selector nodes in much less code without having to remember +// the selector sigils +// 2. Provide a level of protection from selector schema changes, at least in terms +// of naming, if not structure +type SelectorSpecBuilder interface { + ExploreRecursiveEdge() SelectorSpec + ExploreRecursive(limit selector.RecursionLimit, sequence SelectorSpec) SelectorSpec + ExploreUnion(...SelectorSpec) SelectorSpec + ExploreAll(next SelectorSpec) SelectorSpec + ExploreIndex(index int, next SelectorSpec) SelectorSpec + ExploreRange(start int, end int, next SelectorSpec) SelectorSpec + ExploreFields(ExploreFieldsSpecBuildingClosure) SelectorSpec + Matcher() SelectorSpec +} + +// ExploreFieldsSpecBuildingClosure is a function that provided to SelectorSpecBuilder's +// ExploreFields method that assembles the fields map in the selector using +// an ExploreFieldsSpecBuilder +type ExploreFieldsSpecBuildingClosure func(ExploreFieldsSpecBuilder) + +// ExploreFieldsSpecBuilder is an interface for assemble the map of fields to +// selectors in ExploreFields +type ExploreFieldsSpecBuilder interface { + Insert(k string, v SelectorSpec) +} + +type selectorSpecBuilder struct { + np ipld.NodePrototype +} + +type selectorSpec struct { + n ipld.Node +} + +func (ss selectorSpec) Node() ipld.Node { + return ss.n +} + +func (ss selectorSpec) Selector() (selector.Selector, error) { + return selector.ParseSelector(ss.n) +} + +// NewSelectorSpecBuilder creates a SelectorSpecBuilder which will store +// data in the format determined by the given ipld.NodePrototype. +func NewSelectorSpecBuilder(np ipld.NodePrototype) SelectorSpecBuilder { + return &selectorSpecBuilder{np} +} + +func (ssb *selectorSpecBuilder) ExploreRecursiveEdge() SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_ExploreRecursiveEdge).CreateMap(0, func(na fluent.MapAssembler) {}) + }), + } +} + +func (ssb *selectorSpecBuilder) ExploreRecursive(limit selector.RecursionLimit, sequence SelectorSpec) SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_ExploreRecursive).CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_Limit).CreateMap(1, func(na fluent.MapAssembler) { + switch limit.Mode() { + case selector.RecursionLimit_Depth: + na.AssembleEntry(selector.SelectorKey_LimitDepth).AssignInt(limit.Depth()) + case selector.RecursionLimit_None: + na.AssembleEntry(selector.SelectorKey_LimitNone).CreateMap(0, func(na fluent.MapAssembler) {}) + default: + panic("Unsupported recursion limit type") + } + }) + na.AssembleEntry(selector.SelectorKey_Sequence).AssignNode(sequence.Node()) + }) + }), + } +} + +func (ssb *selectorSpecBuilder) ExploreAll(next SelectorSpec) SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_ExploreAll).CreateMap(1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_Next).AssignNode(next.Node()) + }) + }), + } +} +func (ssb *selectorSpecBuilder) ExploreIndex(index int, next SelectorSpec) SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_ExploreIndex).CreateMap(2, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_Index).AssignInt(index) + na.AssembleEntry(selector.SelectorKey_Next).AssignNode(next.Node()) + }) + }), + } +} + +func (ssb *selectorSpecBuilder) ExploreRange(start int, end int, next SelectorSpec) SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_ExploreRange).CreateMap(3, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_Start).AssignInt(start) + na.AssembleEntry(selector.SelectorKey_End).AssignInt(end) + na.AssembleEntry(selector.SelectorKey_Next).AssignNode(next.Node()) + }) + }), + } +} + +func (ssb *selectorSpecBuilder) ExploreUnion(members ...SelectorSpec) SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_ExploreUnion).CreateList(len(members), func(na fluent.ListAssembler) { + for _, member := range members { + na.AssembleValue().AssignNode(member.Node()) + } + }) + }), + } +} + +func (ssb *selectorSpecBuilder) ExploreFields(specBuilder ExploreFieldsSpecBuildingClosure) SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_ExploreFields).CreateMap(1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_Fields).CreateMap(-1, func(na fluent.MapAssembler) { + specBuilder(exploreFieldsSpecBuilder{na}) + }) + }) + }), + } +} + +func (ssb *selectorSpecBuilder) Matcher() SelectorSpec { + return selectorSpec{ + fluent.MustBuildMap(ssb.np, 1, func(na fluent.MapAssembler) { + na.AssembleEntry(selector.SelectorKey_Matcher).CreateMap(0, func(na fluent.MapAssembler) {}) + }), + } +} + +type exploreFieldsSpecBuilder struct { + na fluent.MapAssembler +} + +func (efsb exploreFieldsSpecBuilder) Insert(field string, s SelectorSpec) { + efsb.na.AssembleEntry(field).AssignNode(s.Node()) +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreAll.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreAll.go new file mode 100644 index 0000000000..eb8647a149 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreAll.go @@ -0,0 +1,44 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// ExploreAll is similar to a `*` -- it traverses all elements of an array, +// or all entries in a map, and applies a next selector to the reached nodes. +type ExploreAll struct { + next Selector // selector for element we're interested in +} + +// Interests for ExploreAll is nil (meaning traverse everything) +func (s ExploreAll) Interests() []ipld.PathSegment { + return nil +} + +// Explore returns the node's selector for all fields +func (s ExploreAll) Explore(n ipld.Node, p ipld.PathSegment) Selector { + return s.next +} + +// Decide always returns false because this is not a matcher +func (s ExploreAll) Decide(n ipld.Node) bool { + return false +} + +// ParseExploreAll assembles a Selector from a ExploreAll selector node +func (pc ParseContext) ParseExploreAll(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector body must be a map") + } + next, err := n.LookupByString(SelectorKey_Next) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: next field must be present in ExploreAll selector") + } + selector, err := pc.ParseSelector(next) + if err != nil { + return nil, err + } + return ExploreAll{selector}, nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreFields.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreFields.go new file mode 100644 index 0000000000..f5579c7c9a --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreFields.go @@ -0,0 +1,72 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// ExploreFields traverses named fields in a map (or equivalently, struct, if +// traversing on typed/schema nodes) and applies a next selector to the +// reached nodes. +// +// Note that a concept of "ExplorePath" (e.g. "foo/bar/baz") can be represented +// as a set of three nexted ExploreFields selectors, each specifying one field. +// (For this reason, we don't have a special "ExplorePath" feature; use this.) +// +// ExploreFields also works for selecting specific elements out of a list; +// if a "field" is a base-10 int, it will be coerced and do the right thing. +// ExploreIndex or ExploreRange is more appropriate, however, and should be preferred. +type ExploreFields struct { + selections map[string]Selector + interests []ipld.PathSegment // keys of above; already boxed as that's the only way we consume them +} + +// Interests for ExploreFields are the fields listed in the selector node +func (s ExploreFields) Interests() []ipld.PathSegment { + return s.interests +} + +// Explore returns the selector for the given path if it is a field in +// the selector node or nil if not +func (s ExploreFields) Explore(n ipld.Node, p ipld.PathSegment) Selector { + return s.selections[p.String()] +} + +// Decide always returns false because this is not a matcher +func (s ExploreFields) Decide(n ipld.Node) bool { + return false +} + +// ParseExploreFields assembles a Selector +// from a ExploreFields selector node +func (pc ParseContext) ParseExploreFields(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector body must be a map") + } + fields, err := n.LookupByString(SelectorKey_Fields) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: fields in ExploreFields selector must be present") + } + if fields.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: fields in ExploreFields selector must be a map") + } + x := ExploreFields{ + make(map[string]Selector, fields.Length()), + make([]ipld.PathSegment, 0, fields.Length()), + } + for itr := fields.MapIterator(); !itr.Done(); { + kn, v, err := itr.Next() + if err != nil { + return nil, fmt.Errorf("error during selector spec parse: %s", err) + } + + kstr, _ := kn.AsString() + x.interests = append(x.interests, ipld.PathSegmentOfString(kstr)) + x.selections[kstr], err = pc.ParseSelector(v) + if err != nil { + return nil, err + } + } + return x, nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreIndex.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreIndex.go new file mode 100644 index 0000000000..ff0b2d5d83 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreIndex.go @@ -0,0 +1,63 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// ExploreIndex traverses a specific index in a list, and applies a next +// selector to the reached node. +type ExploreIndex struct { + next Selector // selector for element we're interested in + interest [1]ipld.PathSegment // index of element we're interested in +} + +// Interests for ExploreIndex is just the index specified by the selector node +func (s ExploreIndex) Interests() []ipld.PathSegment { + return s.interest[:] +} + +// Explore returns the node's selector if +// the path matches the index the index for this selector or nil if not +func (s ExploreIndex) Explore(n ipld.Node, p ipld.PathSegment) Selector { + if n.ReprKind() != ipld.ReprKind_List { + return nil + } + expectedIndex, expectedErr := p.Index() + actualIndex, actualErr := s.interest[0].Index() + if expectedErr != nil || actualErr != nil || expectedIndex != actualIndex { + return nil + } + return s.next +} + +// Decide always returns false because this is not a matcher +func (s ExploreIndex) Decide(n ipld.Node) bool { + return false +} + +// ParseExploreIndex assembles a Selector +// from a ExploreIndex selector node +func (pc ParseContext) ParseExploreIndex(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector body must be a map") + } + indexNode, err := n.LookupByString(SelectorKey_Index) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: index field must be present in ExploreIndex selector") + } + indexValue, err := indexNode.AsInt() + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: index field must be a number in ExploreIndex selector") + } + next, err := n.LookupByString(SelectorKey_Next) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: next field must be present in ExploreIndex selector") + } + selector, err := pc.ParseSelector(next) + if err != nil { + return nil, err + } + return ExploreIndex{selector, [1]ipld.PathSegment{ipld.PathSegmentOfInt(indexValue)}}, nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRange.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRange.go new file mode 100644 index 0000000000..87426194c3 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRange.go @@ -0,0 +1,87 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// ExploreRange traverses a list, and for each element in the range specified, +// will apply a next selector to those reached nodes. +type ExploreRange struct { + next Selector // selector for element we're interested in + start int + end int + interest []ipld.PathSegment // index of element we're interested in +} + +// Interests for ExploreRange are all path segments within the iteration range +func (s ExploreRange) Interests() []ipld.PathSegment { + return s.interest +} + +// Explore returns the node's selector if +// the path matches an index in the range of this selector +func (s ExploreRange) Explore(n ipld.Node, p ipld.PathSegment) Selector { + if n.ReprKind() != ipld.ReprKind_List { + return nil + } + index, err := p.Index() + if err != nil { + return nil + } + if index < s.start || index >= s.end { + return nil + } + return s.next +} + +// Decide always returns false because this is not a matcher +func (s ExploreRange) Decide(n ipld.Node) bool { + return false +} + +// ParseExploreRange assembles a Selector +// from a ExploreRange selector node +func (pc ParseContext) ParseExploreRange(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector body must be a map") + } + startNode, err := n.LookupByString(SelectorKey_Start) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: start field must be present in ExploreRange selector") + } + startValue, err := startNode.AsInt() + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: start field must be a number in ExploreRange selector") + } + endNode, err := n.LookupByString(SelectorKey_End) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: end field must be present in ExploreRange selector") + } + endValue, err := endNode.AsInt() + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: end field must be a number in ExploreRange selector") + } + if startValue >= endValue { + return nil, fmt.Errorf("selector spec parse rejected: end field must be greater than start field in ExploreRange selector") + } + next, err := n.LookupByString(SelectorKey_Next) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: next field must be present in ExploreRange selector") + } + selector, err := pc.ParseSelector(next) + if err != nil { + return nil, err + } + x := ExploreRange{ + selector, + startValue, + endValue, + make([]ipld.PathSegment, 0, endValue-startValue), + } + for i := startValue; i < endValue; i++ { + x.interest = append(x.interest, ipld.PathSegmentOfInt(i)) + } + return x, nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRecursive.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRecursive.go new file mode 100644 index 0000000000..68529760ae --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRecursive.go @@ -0,0 +1,219 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// ExploreRecursive traverses some structure recursively. +// To guide this exploration, it uses a "sequence", which is another Selector +// tree; some leaf node in this sequence should contain an ExploreRecursiveEdge +// selector, which denotes the place recursion should occur. +// +// In implementation, whenever evaluation reaches an ExploreRecursiveEdge marker +// in the recursion sequence's Selector tree, the implementation logically +// produces another new Selector which is a copy of the original +// ExploreRecursive selector, but with a decremented maxDepth parameter, and +// continues evaluation thusly. +// +// It is not valid for an ExploreRecursive selector's sequence to contain +// no instances of ExploreRecursiveEdge; it *is* valid for it to contain +// more than one ExploreRecursiveEdge. +// +// ExploreRecursive can contain a nested ExploreRecursive! +// This is comparable to a nested for-loop. +// In these cases, any ExploreRecursiveEdge instance always refers to the +// nearest parent ExploreRecursive (in other words, ExploreRecursiveEdge can +// be thought of like the 'continue' statement, or end of a for-loop body; +// it is *not* a 'goto' statement). +// +// Be careful when using ExploreRecursive with a large maxDepth parameter; +// it can easily cause very large traversals (especially if used in combination +// with selectors like ExploreAll inside the sequence). +type ExploreRecursive struct { + sequence Selector // selector for element we're interested in + current Selector // selector to apply to the current node + limit RecursionLimit // the limit for this recursive selector +} + +// RecursionLimit_Mode is an enum that represents the type of a recursion limit +// -- either "depth" or "none" for now +type RecursionLimit_Mode uint8 + +const ( + // RecursionLimit_None means there is no recursion limit + RecursionLimit_None RecursionLimit_Mode = 0 + // RecursionLimit_Depth mean recursion stops after the recursive selector + // is copied to a given depth + RecursionLimit_Depth RecursionLimit_Mode = 1 +) + +// RecursionLimit is a union type that captures all data about the recursion +// limit (both its type and data specific to the type) +type RecursionLimit struct { + mode RecursionLimit_Mode + depth int +} + +// Mode returns the type for this recursion limit +func (rl RecursionLimit) Mode() RecursionLimit_Mode { + return rl.mode +} + +// Depth returns the depth for a depth recursion limit, or 0 otherwise +func (rl RecursionLimit) Depth() int { + if rl.mode != RecursionLimit_Depth { + return 0 + } + return rl.depth +} + +// RecursionLimitDepth returns a depth limited recursion to the given depth +func RecursionLimitDepth(depth int) RecursionLimit { + return RecursionLimit{RecursionLimit_Depth, depth} +} + +// RecursionLimitNone return recursion with no limit +func RecursionLimitNone() RecursionLimit { + return RecursionLimit{RecursionLimit_None, 0} +} + +// Interests for ExploreRecursive is empty (meaning traverse everything) +func (s ExploreRecursive) Interests() []ipld.PathSegment { + return s.current.Interests() +} + +// Explore returns the node's selector for all fields +func (s ExploreRecursive) Explore(n ipld.Node, p ipld.PathSegment) Selector { + nextSelector := s.current.Explore(n, p) + limit := s.limit + + if nextSelector == nil { + return nil + } + if !s.hasRecursiveEdge(nextSelector) { + return ExploreRecursive{s.sequence, nextSelector, limit} + } + switch limit.mode { + case RecursionLimit_Depth: + if limit.depth < 2 { + return s.replaceRecursiveEdge(nextSelector, nil) + } + return ExploreRecursive{s.sequence, s.replaceRecursiveEdge(nextSelector, s.sequence), RecursionLimit{RecursionLimit_Depth, limit.depth - 1}} + case RecursionLimit_None: + return ExploreRecursive{s.sequence, s.replaceRecursiveEdge(nextSelector, s.sequence), limit} + default: + panic("Unsupported recursion limit type") + } +} + +func (s ExploreRecursive) hasRecursiveEdge(nextSelector Selector) bool { + _, isRecursiveEdge := nextSelector.(ExploreRecursiveEdge) + if isRecursiveEdge { + return true + } + exploreUnion, isUnion := nextSelector.(ExploreUnion) + if isUnion { + for _, selector := range exploreUnion.Members { + if s.hasRecursiveEdge(selector) { + return true + } + } + } + return false +} + +func (s ExploreRecursive) replaceRecursiveEdge(nextSelector Selector, replacement Selector) Selector { + _, isRecursiveEdge := nextSelector.(ExploreRecursiveEdge) + if isRecursiveEdge { + return replacement + } + exploreUnion, isUnion := nextSelector.(ExploreUnion) + if isUnion { + replacementMembers := make([]Selector, 0, len(exploreUnion.Members)) + for _, selector := range exploreUnion.Members { + newSelector := s.replaceRecursiveEdge(selector, replacement) + if newSelector != nil { + replacementMembers = append(replacementMembers, newSelector) + } + } + if len(replacementMembers) == 0 { + return nil + } + if len(replacementMembers) == 1 { + return replacementMembers[0] + } + return ExploreUnion{replacementMembers} + } + return nextSelector +} + +// Decide always returns false because this is not a matcher +func (s ExploreRecursive) Decide(n ipld.Node) bool { + return s.current.Decide(n) +} + +type exploreRecursiveContext struct { + edgesFound int +} + +func (erc *exploreRecursiveContext) Link(s Selector) bool { + _, ok := s.(ExploreRecursiveEdge) + if ok { + erc.edgesFound++ + } + return ok +} + +// ParseExploreRecursive assembles a Selector from a ExploreRecursive selector node +func (pc ParseContext) ParseExploreRecursive(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector body must be a map") + } + + limitNode, err := n.LookupByString(SelectorKey_Limit) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: limit field must be present in ExploreRecursive selector") + } + limit, err := parseLimit(limitNode) + if err != nil { + return nil, err + } + sequence, err := n.LookupByString(SelectorKey_Sequence) + if err != nil { + return nil, fmt.Errorf("selector spec parse rejected: sequence field must be present in ExploreRecursive selector") + } + erc := &exploreRecursiveContext{} + selector, err := pc.PushParent(erc).ParseSelector(sequence) + if err != nil { + return nil, err + } + if erc.edgesFound == 0 { + return nil, fmt.Errorf("selector spec parse rejected: ExploreRecursive must have at least one ExploreRecursiveEdge") + } + return ExploreRecursive{selector, selector, limit}, nil +} + +func parseLimit(n ipld.Node) (RecursionLimit, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return RecursionLimit{}, fmt.Errorf("selector spec parse rejected: limit in ExploreRecursive is a keyed union and thus must be a map") + } + if n.Length() != 1 { + return RecursionLimit{}, fmt.Errorf("selector spec parse rejected: limit in ExploreRecursive is a keyed union and thus must be a single-entry map") + } + kn, v, _ := n.MapIterator().Next() + kstr, _ := kn.AsString() + switch kstr { + case SelectorKey_LimitDepth: + maxDepthValue, err := v.AsInt() + if err != nil { + return RecursionLimit{}, fmt.Errorf("selector spec parse rejected: limit field of type depth must be a number in ExploreRecursive selector") + } + return RecursionLimit{RecursionLimit_Depth, maxDepthValue}, nil + case SelectorKey_LimitNone: + return RecursionLimit{RecursionLimit_None, 0}, nil + default: + return RecursionLimit{}, fmt.Errorf("selector spec parse rejected: %q is not a known member of the limit union in ExploreRecursive", kstr) + } +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRecursiveEdge.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRecursiveEdge.go new file mode 100644 index 0000000000..2ff62e5ea2 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreRecursiveEdge.go @@ -0,0 +1,47 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// ExploreRecursiveEdge is a special sentinel value which is used to mark +// the end of a sequence started by an ExploreRecursive selector: the recursion +// goes back to the initial state of the earlier ExploreRecursive selector, +// and proceeds again (with a decremented maxDepth value). +// +// An ExploreRecursive selector that doesn't contain an ExploreRecursiveEdge +// is nonsensical. Containing more than one ExploreRecursiveEdge is valid. +// An ExploreRecursiveEdge without an enclosing ExploreRecursive is an error. +type ExploreRecursiveEdge struct{} + +// Interests should ultimately never get called for an ExploreRecursiveEdge selector +func (s ExploreRecursiveEdge) Interests() []ipld.PathSegment { + panic("Traversed Explore Recursive Edge Node With No Parent") +} + +// Explore should ultimately never get called for an ExploreRecursiveEdge selector +func (s ExploreRecursiveEdge) Explore(n ipld.Node, p ipld.PathSegment) Selector { + panic("Traversed Explore Recursive Edge Node With No Parent") +} + +// Decide should ultimately never get called for an ExploreRecursiveEdge selector +func (s ExploreRecursiveEdge) Decide(n ipld.Node) bool { + panic("Traversed Explore Recursive Edge Node With No Parent") +} + +// ParseExploreRecursiveEdge assembles a Selector +// from a exploreRecursiveEdge selector node +func (pc ParseContext) ParseExploreRecursiveEdge(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector body must be a map") + } + s := ExploreRecursiveEdge{} + for _, parent := range pc.parentStack { + if parent.Link(s) { + return s, nil + } + } + return nil, fmt.Errorf("selector spec parse rejected: ExploreRecursiveEdge must be beneath ExploreRecursive") +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreUnion.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreUnion.go new file mode 100644 index 0000000000..4261693f32 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/exploreUnion.go @@ -0,0 +1,95 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// ExploreUnion allows selection to continue with two or more distinct selectors +// while exploring the same tree of data. +// +// ExploreUnion can be used to apply a Matcher on one node (causing it to +// be considered part of a (possibly labelled) result set), while simultaneously +// continuing to explore deeper parts of the tree with another selector, +// for example. +type ExploreUnion struct { + Members []Selector +} + +// Interests for ExploreUnion is: +// - nil (aka all) if any member selector has nil interests +// - the union of values returned by all member selectors otherwise +func (s ExploreUnion) Interests() []ipld.PathSegment { + // Check for any high-cardinality selectors first; if so, shortcircuit. + // (n.b. we're assuming the 'Interests' method is cheap here.) + for _, m := range s.Members { + if m.Interests() == nil { + return nil + } + } + // Accumulate the whitelist of interesting path segments. + // TODO: Dedup? + v := []ipld.PathSegment{} + for _, m := range s.Members { + v = append(v, m.Interests()...) + } + return v +} + +// Explore for a Union selector calls explore for each member selector +// and returns: +// - a new union selector if more than one member returns a selector +// - if exactly one member returns a selector, that selector +// - nil if no members return a selector +func (s ExploreUnion) Explore(n ipld.Node, p ipld.PathSegment) Selector { + // TODO: memory efficient? + nonNilResults := make([]Selector, 0, len(s.Members)) + for _, member := range s.Members { + resultSelector := member.Explore(n, p) + if resultSelector != nil { + nonNilResults = append(nonNilResults, resultSelector) + } + } + if len(nonNilResults) == 0 { + return nil + } + if len(nonNilResults) == 1 { + return nonNilResults[0] + } + return ExploreUnion{nonNilResults} +} + +// Decide returns true for a Union selector if any of the member selectors +// return true +func (s ExploreUnion) Decide(n ipld.Node) bool { + for _, m := range s.Members { + if m.Decide(n) { + return true + } + } + return false +} + +// ParseExploreUnion assembles a Selector +// from an ExploreUnion selector node +func (pc ParseContext) ParseExploreUnion(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_List { + return nil, fmt.Errorf("selector spec parse rejected: explore union selector must be a list") + } + x := ExploreUnion{ + make([]Selector, 0, n.Length()), + } + for itr := n.ListIterator(); !itr.Done(); { + _, v, err := itr.Next() + if err != nil { + return nil, fmt.Errorf("error during selector spec parse: %s", err) + } + member, err := pc.ParseSelector(v) + if err != nil { + return nil, err + } + x.Members = append(x.Members, member) + } + return x, nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/fieldKeys.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/fieldKeys.go new file mode 100644 index 0000000000..ba222d56b3 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/fieldKeys.go @@ -0,0 +1,25 @@ +package selector + +const ( + SelectorKey_Matcher = "." + SelectorKey_ExploreAll = "a" + SelectorKey_ExploreFields = "f" + SelectorKey_ExploreIndex = "i" + SelectorKey_ExploreRange = "r" + SelectorKey_ExploreRecursive = "R" + SelectorKey_ExploreUnion = "|" + SelectorKey_ExploreConditional = "&" + SelectorKey_ExploreRecursiveEdge = "@" + SelectorKey_Next = ">" + SelectorKey_Fields = "f>" + SelectorKey_Index = "i" + SelectorKey_Start = "^" + SelectorKey_End = "$" + SelectorKey_Sequence = ":>" + SelectorKey_Limit = "l" + SelectorKey_LimitDepth = "depth" + SelectorKey_LimitNone = "none" + SelectorKey_StopAt = "!" + SelectorKey_Condition = "&" + // not filling conditional keys since it's not complete +) diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/matcher.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/matcher.go new file mode 100644 index 0000000000..b2305ef863 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/matcher.go @@ -0,0 +1,46 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// Matcher marks a node to be included in the "result" set. +// (All nodes traversed by a selector are in the "covered" set (which is a.k.a. +// "the merkle proof"); the "result" set is a subset of the "covered" set.) +// +// In libraries using selectors, the "result" set is typically provided to +// some user-specified callback. +// +// A selector tree with only "explore*"-type selectors and no Matcher selectors +// is valid; it will just generate a "covered" set of nodes and no "result" set. +// TODO: From spec: implement conditions and labels +type Matcher struct{} + +// Interests are empty for a matcher (for now) because +// It is always just there to match, not explore further +func (s Matcher) Interests() []ipld.PathSegment { + return []ipld.PathSegment{} +} + +// Explore will return nil because a matcher is a terminal selector +func (s Matcher) Explore(n ipld.Node, p ipld.PathSegment) Selector { + return nil +} + +// Decide is always true for a match cause it's in the result set +// TODO: Implement boolean logic for conditionals +func (s Matcher) Decide(n ipld.Node) bool { + return true +} + +// ParseMatcher assembles a Selector +// from a matcher selector node +// TODO: Parse labels and conditions +func (pc ParseContext) ParseMatcher(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector body must be a map") + } + return Matcher{}, nil +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/selector/selector.go b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/selector.go new file mode 100644 index 0000000000..b410cefa3d --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/selector/selector.go @@ -0,0 +1,119 @@ +package selector + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" +) + +// Selector is the programmatic representation of an IPLD Selector Node +// and can be applied to traverse a given IPLD DAG +type Selector interface { + Interests() []ipld.PathSegment // returns the segments we're likely interested in **or nil** if we're a high-cardinality or expression based matcher and need all segments proposed to us. + Explore(ipld.Node, ipld.PathSegment) Selector // explore one step -- iteration comes from outside (either whole node, or by following suggestions of Interests). returns nil if no interest. you have to traverse to the next node yourself (the selector doesn't do it for you because you might be considering multiple selection reasons at the same time). + Decide(ipld.Node) bool +} + +// ParsedParent is created whenever you are parsing a selector node that may have +// child selectors nodes that need to know it +type ParsedParent interface { + Link(s Selector) bool +} + +// ParseContext tracks the progress when parsing a selector +type ParseContext struct { + parentStack []ParsedParent +} + +// ParseSelector creates a Selector that can be traversed from an IPLD Selector node +func ParseSelector(n ipld.Node) (Selector, error) { + return ParseContext{}.ParseSelector(n) +} + +// ParseSelector creates a Selector from an IPLD Selector Node with the given context +func (pc ParseContext) ParseSelector(n ipld.Node) (Selector, error) { + if n.ReprKind() != ipld.ReprKind_Map { + return nil, fmt.Errorf("selector spec parse rejected: selector is a keyed union and thus must be a map") + } + if n.Length() != 1 { + return nil, fmt.Errorf("selector spec parse rejected: selector is a keyed union and thus must be single-entry map") + } + kn, v, _ := n.MapIterator().Next() + kstr, _ := kn.AsString() + // Switch over the single key to determine which selector body comes next. + // (This switch is where the keyed union discriminators concretely happen.) + switch kstr { + case SelectorKey_ExploreFields: + return pc.ParseExploreFields(v) + case SelectorKey_ExploreAll: + return pc.ParseExploreAll(v) + case SelectorKey_ExploreIndex: + return pc.ParseExploreIndex(v) + case SelectorKey_ExploreRange: + return pc.ParseExploreRange(v) + case SelectorKey_ExploreUnion: + return pc.ParseExploreUnion(v) + case SelectorKey_ExploreRecursive: + return pc.ParseExploreRecursive(v) + case SelectorKey_ExploreRecursiveEdge: + return pc.ParseExploreRecursiveEdge(v) + case SelectorKey_Matcher: + return pc.ParseMatcher(v) + default: + return nil, fmt.Errorf("selector spec parse rejected: %q is not a known member of the selector union", kstr) + } +} + +// PushParent puts a parent onto the stack of parents for a parse context +func (pc ParseContext) PushParent(parent ParsedParent) ParseContext { + l := len(pc.parentStack) + parents := make([]ParsedParent, 0, l+1) + parents = append(parents, parent) + parents = append(parents, pc.parentStack...) + return ParseContext{parents} +} + +// SegmentIterator iterates either a list or a map, generating PathSegments +// instead of indexes or keys +type SegmentIterator interface { + Next() (pathSegment ipld.PathSegment, value ipld.Node, err error) + Done() bool +} + +// NewSegmentIterator generates a new iterator based on the node type +func NewSegmentIterator(n ipld.Node) SegmentIterator { + if n.ReprKind() == ipld.ReprKind_List { + return listSegmentIterator{n.ListIterator()} + } + return mapSegmentIterator{n.MapIterator()} +} + +type listSegmentIterator struct { + ipld.ListIterator +} + +func (lsi listSegmentIterator) Next() (pathSegment ipld.PathSegment, value ipld.Node, err error) { + i, v, err := lsi.ListIterator.Next() + return ipld.PathSegmentOfInt(i), v, err +} + +func (lsi listSegmentIterator) Done() bool { + return lsi.ListIterator.Done() +} + +type mapSegmentIterator struct { + ipld.MapIterator +} + +func (msi mapSegmentIterator) Next() (pathSegment ipld.PathSegment, value ipld.Node, err error) { + k, v, err := msi.MapIterator.Next() + if err != nil { + return ipld.PathSegment{}, v, err + } + kstr, _ := k.AsString() + return ipld.PathSegmentOfString(kstr), v, err +} + +func (msi mapSegmentIterator) Done() bool { + return msi.MapIterator.Done() +} diff --git a/vendor/github.com/ipld/go-ipld-prime/traversal/walk.go b/vendor/github.com/ipld/go-ipld-prime/traversal/walk.go new file mode 100644 index 0000000000..27ec217420 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/traversal/walk.go @@ -0,0 +1,229 @@ +package traversal + +import ( + "fmt" + + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/traversal/selector" +) + +// WalkMatching walks a graph of Nodes, deciding which to visit by applying a Selector, +// and calling the given VisitFn on those that the Selector deems a match. +// +// This function is a helper function which starts a new walk with default configuration. +// It cannot cross links automatically (since this requires configuration). +// Use the equivalent WalkMatching function on the Progress structure +// for more advanced and configurable walks. +func WalkMatching(n ipld.Node, s selector.Selector, fn VisitFn) error { + return Progress{}.WalkMatching(n, s, fn) +} + +// WalkAdv is identical to WalkMatching, except it is called for *all* nodes +// visited (not just matching nodes), together with the reason for the visit. +// An AdvVisitFn is used instead of a VisitFn, so that the reason can be provided. +// +// This function is a helper function which starts a new walk with default configuration. +// It cannot cross links automatically (since this requires configuration). +// Use the equivalent WalkAdv function on the Progress structure +// for more advanced and configurable walks. +func WalkAdv(n ipld.Node, s selector.Selector, fn AdvVisitFn) error { + return Progress{}.WalkAdv(n, s, fn) +} + +// WalkTransforming walks a graph of Nodes, deciding which to alter by applying a Selector, +// and calls the given TransformFn to decide what new node to replace the visited node with. +// A new Node tree will be returned (the original is unchanged). +// +// This function is a helper function which starts a new walk with default configuration. +// It cannot cross links automatically (since this requires configuration). +// Use the equivalent WalkTransforming function on the Progress structure +// for more advanced and configurable walks. +func WalkTransforming(n ipld.Node, s selector.Selector, fn TransformFn) (ipld.Node, error) { + return Progress{}.WalkTransforming(n, s, fn) +} + +// WalkMatching walks a graph of Nodes, deciding which to visit by applying a Selector, +// and calling the given VisitFn on those that the Selector deems a match. +// +// WalkMatching is a read-only traversal. +// See WalkTransforming if looking for a way to do "updates" to a tree of nodes. +// +// Provide configuration to this process using the Config field in the Progress object. +// +// This walk will automatically cross links, but requires some configuration +// with link loading functions to do so. +// +// Traversals are defined as visiting a (node,path) tuple. +// This is important to note because when walking DAGs with Links, +// it means you may visit the same node multiple times +// due to having reached it via a different path. +// (You can prevent this by using a LinkLoader function which memoizes a set of +// already-visited Links, and returns a SkipMe when encountering them again.) +// +// WalkMatching (and the other traversal functions) can be used again again inside the VisitFn! +// By using the traversal.Progress handed to the VisitFn, +// the Path recorded of the traversal so far will continue to be extended, +// and thus continued nested uses of Walk and Focus will see the fully contextualized Path. +// +func (prog Progress) WalkMatching(n ipld.Node, s selector.Selector, fn VisitFn) error { + prog.init() + return prog.walkAdv(n, s, func(prog Progress, n ipld.Node, tr VisitReason) error { + if tr != VisitReason_SelectionMatch { + return nil + } + return fn(prog, n) + }) +} + +// WalkAdv is identical to WalkMatching, except it is called for *all* nodes +// visited (not just matching nodes), together with the reason for the visit. +// An AdvVisitFn is used instead of a VisitFn, so that the reason can be provided. +// +func (prog Progress) WalkAdv(n ipld.Node, s selector.Selector, fn AdvVisitFn) error { + prog.init() + return prog.walkAdv(n, s, fn) +} + +func (prog Progress) walkAdv(n ipld.Node, s selector.Selector, fn AdvVisitFn) error { + if s.Decide(n) { + if err := fn(prog, n, VisitReason_SelectionMatch); err != nil { + return err + } + } else { + if err := fn(prog, n, VisitReason_SelectionCandidate); err != nil { + return err + } + } + nk := n.ReprKind() + switch nk { + case ipld.ReprKind_Map, ipld.ReprKind_List: // continue + default: + return nil + } + attn := s.Interests() + if attn == nil { + return prog.walkAdv_iterateAll(n, s, fn) + } + return prog.walkAdv_iterateSelective(n, attn, s, fn) + +} + +func (prog Progress) walkAdv_iterateAll(n ipld.Node, s selector.Selector, fn AdvVisitFn) error { + for itr := selector.NewSegmentIterator(n); !itr.Done(); { + ps, v, err := itr.Next() + if err != nil { + return err + } + sNext := s.Explore(n, ps) + if sNext != nil { + progNext := prog + progNext.Path = prog.Path.AppendSegment(ps) + if v.ReprKind() == ipld.ReprKind_Link { + lnk, _ := v.AsLink() + progNext.LastBlock.Path = progNext.Path + progNext.LastBlock.Link = lnk + v, err = progNext.loadLink(v, n) + if err != nil { + if _, ok := err.(SkipMe); ok { + return nil + } + return err + } + } + + err = progNext.walkAdv(v, sNext, fn) + if err != nil { + return err + } + } + } + return nil +} + +func (prog Progress) walkAdv_iterateSelective(n ipld.Node, attn []ipld.PathSegment, s selector.Selector, fn AdvVisitFn) error { + for _, ps := range attn { + v, err := n.LookupBySegment(ps) + if err != nil { + continue + } + sNext := s.Explore(n, ps) + if sNext != nil { + progNext := prog + progNext.Path = prog.Path.AppendSegment(ps) + if v.ReprKind() == ipld.ReprKind_Link { + lnk, _ := v.AsLink() + progNext.LastBlock.Path = progNext.Path + progNext.LastBlock.Link = lnk + v, err = progNext.loadLink(v, n) + if err != nil { + if _, ok := err.(SkipMe); ok { + return nil + } + return err + } + } + + err = progNext.walkAdv(v, sNext, fn) + if err != nil { + return err + } + } + } + return nil +} + +func (prog Progress) loadLink(v ipld.Node, parent ipld.Node) (ipld.Node, error) { + lnk, err := v.AsLink() + if err != nil { + return nil, err + } + // Assemble the LinkContext in case the Loader or NBChooser want it. + lnkCtx := ipld.LinkContext{ + LinkPath: prog.Path, + LinkNode: v, + ParentNode: parent, + } + // Pick what in-memory format we will build. + np, err := prog.Cfg.LinkTargetNodePrototypeChooser(lnk, lnkCtx) + if err != nil { + return nil, fmt.Errorf("error traversing node at %q: could not load link %q: %s", prog.Path, lnk, err) + } + nb := np.NewBuilder() + // Load link! + err = lnk.Load( + prog.Cfg.Ctx, + lnkCtx, + nb, + prog.Cfg.LinkLoader, + ) + if err != nil { + if _, ok := err.(SkipMe); ok { + return nil, err + } + return nil, fmt.Errorf("error traversing node at %q: could not load link %q: %s", prog.Path, lnk, err) + } + return nb.Build(), nil +} + +// WalkTransforming walks a graph of Nodes, deciding which to alter by applying a Selector, +// and calls the given TransformFn to decide what new node to replace the visited node with. +// A new Node tree will be returned (the original is unchanged). +// +// If the TransformFn returns the same Node which it was called with, +// then the transform is a no-op; if every visited node is a no-op, +// then the root node returned from the walk as a whole will also be +// the same as its starting Node (no new memory will be used). +// +// When a Node is replaced, no further recursion of this walk will occur on its contents. +// (You can certainly do a additional traversals, including transforms, +// from inside the TransformFn while building the replacement node.) +// +// The prototype (that is, implementation) of Node returned will be the same as the +// prototype of the Nodes at the same positions in the existing tree +// (literally, builders used to construct any new needed intermediate nodes +// are chosen by asking the existing nodes about their prototype). +// +// This feature is not yet implemented. +func (prog Progress) WalkTransforming(n ipld.Node, s selector.Selector, fn TransformFn) (ipld.Node, error) { + panic("TODO") +} diff --git a/vendor/github.com/ipld/go-ipld-prime/unit.go b/vendor/github.com/ipld/go-ipld-prime/unit.go new file mode 100644 index 0000000000..5632a45bb4 --- /dev/null +++ b/vendor/github.com/ipld/go-ipld-prime/unit.go @@ -0,0 +1,125 @@ +package ipld + +var Null Node = nullNode{} + +type nullNode struct{} + +func (nullNode) ReprKind() ReprKind { + return ReprKind_Null +} +func (nullNode) LookupByString(key string) (Node, error) { + return nil, ErrWrongKind{TypeName: "null", MethodName: "LookupByString", AppropriateKind: ReprKindSet_JustMap, ActualKind: ReprKind_Null} +} +func (nullNode) LookupByNode(key Node) (Node, error) { + return nil, ErrWrongKind{TypeName: "null", MethodName: "LookupByNode", AppropriateKind: ReprKindSet_JustMap, ActualKind: ReprKind_Null} +} +func (nullNode) LookupByIndex(idx int) (Node, error) { + return nil, ErrWrongKind{TypeName: "null", MethodName: "LookupByIndex", AppropriateKind: ReprKindSet_JustList, ActualKind: ReprKind_Null} +} +func (nullNode) LookupBySegment(seg PathSegment) (Node, error) { + return nil, ErrWrongKind{TypeName: "null", MethodName: "LookupBySegment", AppropriateKind: ReprKindSet_Recursive, ActualKind: ReprKind_Null} +} +func (nullNode) MapIterator() MapIterator { + return nil +} +func (nullNode) ListIterator() ListIterator { + return nil +} +func (nullNode) Length() int { + return -1 +} +func (nullNode) IsAbsent() bool { + return false +} +func (nullNode) IsNull() bool { + return true +} +func (nullNode) AsBool() (bool, error) { + return false, ErrWrongKind{TypeName: "null", MethodName: "AsBool", AppropriateKind: ReprKindSet_JustBool, ActualKind: ReprKind_Null} +} +func (nullNode) AsInt() (int, error) { + return 0, ErrWrongKind{TypeName: "null", MethodName: "AsInt", AppropriateKind: ReprKindSet_JustInt, ActualKind: ReprKind_Null} +} +func (nullNode) AsFloat() (float64, error) { + return 0, ErrWrongKind{TypeName: "null", MethodName: "AsFloat", AppropriateKind: ReprKindSet_JustFloat, ActualKind: ReprKind_Null} +} +func (nullNode) AsString() (string, error) { + return "", ErrWrongKind{TypeName: "null", MethodName: "AsString", AppropriateKind: ReprKindSet_JustString, ActualKind: ReprKind_Null} +} +func (nullNode) AsBytes() ([]byte, error) { + return nil, ErrWrongKind{TypeName: "null", MethodName: "AsBytes", AppropriateKind: ReprKindSet_JustBytes, ActualKind: ReprKind_Null} +} +func (nullNode) AsLink() (Link, error) { + return nil, ErrWrongKind{TypeName: "null", MethodName: "AsLink", AppropriateKind: ReprKindSet_JustLink, ActualKind: ReprKind_Null} +} +func (nullNode) Prototype() NodePrototype { + return nullPrototype{} +} + +type nullPrototype struct{} + +func (nullPrototype) NewBuilder() NodeBuilder { + panic("cannot build null nodes") // TODO: okay, fine, we could grind out a simple closing of the loop here. +} + +var Absent Node = absentNode{} + +type absentNode struct{} + +func (absentNode) ReprKind() ReprKind { + return ReprKind_Null +} +func (absentNode) LookupByString(key string) (Node, error) { + return nil, ErrWrongKind{TypeName: "absent", MethodName: "LookupByString", AppropriateKind: ReprKindSet_JustMap, ActualKind: ReprKind_Null} +} +func (absentNode) LookupByNode(key Node) (Node, error) { + return nil, ErrWrongKind{TypeName: "absent", MethodName: "LookupByNode", AppropriateKind: ReprKindSet_JustMap, ActualKind: ReprKind_Null} +} +func (absentNode) LookupByIndex(idx int) (Node, error) { + return nil, ErrWrongKind{TypeName: "absent", MethodName: "LookupByIndex", AppropriateKind: ReprKindSet_JustList, ActualKind: ReprKind_Null} +} +func (absentNode) LookupBySegment(seg PathSegment) (Node, error) { + return nil, ErrWrongKind{TypeName: "absent", MethodName: "LookupBySegment", AppropriateKind: ReprKindSet_Recursive, ActualKind: ReprKind_Null} +} +func (absentNode) MapIterator() MapIterator { + return nil +} +func (absentNode) ListIterator() ListIterator { + return nil +} +func (absentNode) Length() int { + return -1 +} +func (absentNode) IsAbsent() bool { + return true +} +func (absentNode) IsNull() bool { + return false +} +func (absentNode) AsBool() (bool, error) { + return false, ErrWrongKind{TypeName: "absent", MethodName: "AsBool", AppropriateKind: ReprKindSet_JustBool, ActualKind: ReprKind_Null} +} +func (absentNode) AsInt() (int, error) { + return 0, ErrWrongKind{TypeName: "absent", MethodName: "AsInt", AppropriateKind: ReprKindSet_JustInt, ActualKind: ReprKind_Null} +} +func (absentNode) AsFloat() (float64, error) { + return 0, ErrWrongKind{TypeName: "absent", MethodName: "AsFloat", AppropriateKind: ReprKindSet_JustFloat, ActualKind: ReprKind_Null} +} +func (absentNode) AsString() (string, error) { + return "", ErrWrongKind{TypeName: "absent", MethodName: "AsString", AppropriateKind: ReprKindSet_JustString, ActualKind: ReprKind_Null} +} +func (absentNode) AsBytes() ([]byte, error) { + return nil, ErrWrongKind{TypeName: "absent", MethodName: "AsBytes", AppropriateKind: ReprKindSet_JustBytes, ActualKind: ReprKind_Null} +} +func (absentNode) AsLink() (Link, error) { + return nil, ErrWrongKind{TypeName: "absent", MethodName: "AsLink", AppropriateKind: ReprKindSet_JustLink, ActualKind: ReprKind_Null} +} +func (absentNode) Prototype() NodePrototype { + return absentPrototype{} +} + +type absentPrototype struct{} + +func (absentPrototype) NewBuilder() NodeBuilder { + panic("cannot build absent nodes") // this definitely stays true. +} diff --git a/vendor/github.com/jbenet/goprocess/.travis.yml b/vendor/github.com/jbenet/goprocess/.travis.yml new file mode 100644 index 0000000000..77d0e7d7d6 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/.travis.yml @@ -0,0 +1,9 @@ +sudo: false + +language: go + +go: + - 1.12 + +script: + - go test -race -v ./... diff --git a/vendor/github.com/jbenet/goprocess/LICENSE b/vendor/github.com/jbenet/goprocess/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/jbenet/goprocess/README.md b/vendor/github.com/jbenet/goprocess/README.md new file mode 100644 index 0000000000..e2f12e16d6 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/README.md @@ -0,0 +1,132 @@ +# goprocess - lifecycles in go + +[![travisbadge](https://travis-ci.org/jbenet/goprocess.svg)](https://travis-ci.org/jbenet/goprocess) + +(Based on https://github.com/jbenet/go-ctxgroup) + +- Godoc: https://godoc.org/github.com/jbenet/goprocess + +`goprocess` introduces a way to manage process lifecycles in go. It is +much like [go.net/context](https://godoc.org/code.google.com/p/go.net/context) +(it actually uses a Context), but it is more like a Context-WaitGroup hybrid. +`goprocess` is about being able to start and stop units of work, which may +receive `Close` signals from many clients. Think of it like a UNIX process +tree, but inside go. + +`goprocess` seeks to minimally affect your objects, so you can use it +with both embedding or composition. At the heart of `goprocess` is the +`Process` interface: + +```Go +// Process is the basic unit of work in goprocess. It defines a computation +// with a lifecycle: +// - running (before calling Close), +// - closing (after calling Close at least once), +// - closed (after Close returns, and all teardown has _completed_). +// +// More specifically, it fits this: +// +// p := WithTeardown(tf) // new process is created, it is now running. +// p.AddChild(q) // can register children **before** Closing. +// go p.Close() // blocks until done running teardown func. +// <-p.Closing() // would now return true. +// <-p.childrenDone() // wait on all children to be done +// p.teardown() // runs the user's teardown function tf. +// p.Close() // now returns, with error teardown returned. +// <-p.Closed() // would now return true. +// +// Processes can be arranged in a process "tree", where children are +// automatically Closed if their parents are closed. (Note, it is actually +// a Process DAG, children may have multiple parents). A process may also +// optionally wait for another to fully Close before beginning to Close. +// This makes it easy to ensure order of operations and proper sequential +// teardown of resurces. For example: +// +// p1 := goprocess.WithTeardown(func() error { +// fmt.Println("closing 1") +// }) +// p2 := goprocess.WithTeardown(func() error { +// fmt.Println("closing 2") +// }) +// p3 := goprocess.WithTeardown(func() error { +// fmt.Println("closing 3") +// }) +// +// p1.AddChild(p2) +// p2.AddChild(p3) +// +// +// go p1.Close() +// go p2.Close() +// go p3.Close() +// +// // Output: +// // closing 3 +// // closing 2 +// // closing 1 +// +// Process is modelled after the UNIX processes group idea, and heavily +// informed by sync.WaitGroup and go.net/context.Context. +// +// In the function documentation of this interface, `p` always refers to +// the self Process. +type Process interface { + + // WaitFor makes p wait for q before exiting. Thus, p will _always_ close + // _after_ q. Note well: a waiting cycle is deadlock. + // + // If q is already Closed, WaitFor calls p.Close() + // If p is already Closing or Closed, WaitFor panics. This is the same thing + // as calling Add(1) _after_ calling Done() on a wait group. Calling WaitFor + // on an already-closed process is a programming error likely due to bad + // synchronization + WaitFor(q Process) + + // AddChildNoWait registers child as a "child" of Process. As in UNIX, + // when parent is Closed, child is Closed -- child may Close beforehand. + // This is the equivalent of calling: + // + // go func(parent, child Process) { + // <-parent.Closing() + // child.Close() + // }(p, q) + // + // Note: the naming of functions is `AddChildNoWait` and `AddChild` (instead + // of `AddChild` and `AddChildWaitFor`) because: + // - it is the more common operation, + // - explicitness is helpful in the less common case (no waiting), and + // - usual "child" semantics imply parent Processes should wait for children. + AddChildNoWait(q Process) + + // AddChild is the equivalent of calling: + // parent.AddChildNoWait(q) + // parent.WaitFor(q) + AddChild(q Process) + + // Go creates a new process, adds it as a child, and spawns the ProcessFunc f + // in its own goroutine. It is equivalent to: + // + // GoChild(p, f) + // + // It is useful to construct simple asynchronous workers, children of p. + Go(f ProcessFunc) Process + + // Close ends the process. Close blocks until the process has completely + // shut down, and any teardown has run _exactly once_. The returned error + // is available indefinitely: calling Close twice returns the same error. + // If the process has already been closed, Close returns immediately. + Close() error + + // Closing is a signal to wait upon. The returned channel is closed + // _after_ Close has been called at least once, but teardown may or may + // not be done yet. The primary use case of Closing is for children who + // need to know when a parent is shutting down, and therefore also shut + // down. + Closing() <-chan struct{} + + // Closed is a signal to wait upon. The returned channel is closed + // _after_ Close has completed; teardown has finished. The primary use case + // of Closed is waiting for a Process to Close without _causing_ the Close. + Closed() <-chan struct{} +} +``` diff --git a/vendor/github.com/jbenet/goprocess/background.go b/vendor/github.com/jbenet/goprocess/background.go new file mode 100644 index 0000000000..d658157083 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/background.go @@ -0,0 +1,33 @@ +package goprocess + +// Background returns the "bgProcess" Process: a statically allocated +// process that can _never_ close. It also never enters Closing() state. +// Calling Background().Close() will hang indefinitely. +func Background() Process { + return background +} + +var background = new(bgProcess) + +type bgProcess struct{} + +func (*bgProcess) WaitFor(q Process) {} +func (*bgProcess) AddChildNoWait(q Process) {} +func (*bgProcess) AddChild(q Process) {} +func (*bgProcess) Close() error { select {} } +func (*bgProcess) CloseAfterChildren() error { select {} } +func (*bgProcess) Closing() <-chan struct{} { return nil } +func (*bgProcess) Closed() <-chan struct{} { return nil } +func (*bgProcess) Err() error { select {} } + +func (*bgProcess) SetTeardown(tf TeardownFunc) { + panic("can't set teardown on bgProcess process") +} +func (*bgProcess) Go(f ProcessFunc) Process { + child := newProcess(nil) + go func() { + f(child) + child.Close() + }() + return child +} diff --git a/vendor/github.com/jbenet/goprocess/context/context.go b/vendor/github.com/jbenet/goprocess/context/context.go new file mode 100644 index 0000000000..54d2d13c29 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/context/context.go @@ -0,0 +1,117 @@ +package goprocessctx + +import ( + "context" + + goprocess "github.com/jbenet/goprocess" +) + +// WithContext constructs and returns a Process that respects +// given context. It is the equivalent of: +// +// func ProcessWithContext(ctx context.Context) goprocess.Process { +// p := goprocess.WithParent(goprocess.Background()) +// CloseAfterContext(p, ctx) +// return p +// } +// +func WithContext(ctx context.Context) goprocess.Process { + p := goprocess.WithParent(goprocess.Background()) + CloseAfterContext(p, ctx) + return p +} + +// WithContextAndTeardown is a helper function to set teardown at initiation +// of WithContext +func WithContextAndTeardown(ctx context.Context, tf goprocess.TeardownFunc) goprocess.Process { + p := goprocess.WithTeardown(tf) + CloseAfterContext(p, ctx) + return p +} + +// WaitForContext makes p WaitFor ctx. When Closing, p waits for +// ctx.Done(), before being Closed(). It is simply: +// +// p.WaitFor(goprocess.WithContext(ctx)) +// +func WaitForContext(ctx context.Context, p goprocess.Process) { + p.WaitFor(WithContext(ctx)) +} + +// CloseAfterContext schedules the process to close after the given +// context is done. It is the equivalent of: +// +// func CloseAfterContext(p goprocess.Process, ctx context.Context) { +// go func() { +// <-ctx.Done() +// p.Close() +// }() +// } +// +func CloseAfterContext(p goprocess.Process, ctx context.Context) { + if p == nil { + panic("nil Process") + } + if ctx == nil { + panic("nil Context") + } + + // Avoid a goroutine for both context.Background() and goprocess.Background(). + if ctx.Done() == nil || p.Closed() == nil { + return + } + + go func() { + select { + case <-ctx.Done(): + p.Close() + case <-p.Closed(): + } + }() +} + +// WithProcessClosing returns a context.Context derived from ctx that +// is cancelled as p is Closing (after: <-p.Closing()). It is simply: +// +// func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context { +// ctx, cancel := context.WithCancel(ctx) +// go func() { +// <-p.Closing() +// cancel() +// }() +// return ctx +// } +// +func WithProcessClosing(ctx context.Context, p goprocess.Process) context.Context { + ctx, cancel := context.WithCancel(ctx) + p.AddChildNoWait(goprocess.WithTeardown(func() error { + cancel() + return nil + })) + return ctx +} + +// WithProcessClosed returns a context.Context that is cancelled +// after Process p is Closed. It is the equivalent of: +// +// func WithProcessClosed(ctx context.Context, p goprocess.Process) context.Context { +// ctx, cancel := context.WithCancel(ctx) +// go func() { +// <-p.Closed() +// cancel() +// }() +// return ctx +// } +// +func WithProcessClosed(ctx context.Context, p goprocess.Process) context.Context { + ctx, cancel := context.WithCancel(ctx) + p.AddChildNoWait(goprocess.WithTeardown(func() error { + select { + case <-p.Closed(): + case <-ctx.Done(): + } + cancel() + return nil + })) + return ctx +} diff --git a/vendor/github.com/jbenet/goprocess/context/context_test.go b/vendor/github.com/jbenet/goprocess/context/context_test.go new file mode 100644 index 0000000000..23e6f4762f --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/context/context_test.go @@ -0,0 +1,193 @@ +package goprocessctx + +import ( + "context" + "testing" + "time" + + "github.com/jbenet/goprocess" +) + +func testClosing(t *testing.T, p goprocess.Process, cancel context.CancelFunc) { + select { + case <-p.Closing(): + t.Fatal("closed") + case <-p.Closed(): + t.Fatal("closed") + case <-time.After(time.Second): + } + + cancel() + + select { + case <-p.Closed(): + case <-time.After(time.Second): + t.Fatal("should have closed") + } + +} + +func TestWithContext(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + testClosing(t, WithContext(ctx), cancel) +} + +func TestWithAndTeardown(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := false + p := WithContextAndTeardown(ctx, func() error { + done = true + return nil + }) + + select { + case <-p.Closing(): + t.Fatal("closed") + case <-p.Closed(): + t.Fatal("closed") + case <-time.After(time.Second): + } + + if done { + t.Fatal("closed early") + } + + cancel() + + select { + case <-p.Closed(): + case <-time.After(time.Second): + t.Fatal("should have closed") + } + + if !done { + t.Fatal("failed to close") + } +} + +func TestWaitForContext(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + done := make(chan struct{}) + p := goprocess.WithTeardown(func() error { + close(done) + return nil + }) + + WaitForContext(ctx, p) + + go func() { + p.Close() + }() + + select { + case <-p.Closing(): + case <-time.After(time.Second): + t.Fatal("should have started closing") + } + + select { + case <-p.Closed(): + t.Fatal("should not have closed") + case <-time.After(time.Second): + } + + cancel() + + select { + case <-p.Closed(): + case <-time.After(time.Second): + t.Fatal("should have closed") + } +} + +func TestCloseAfterContext(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p := goprocess.WithTeardown(func() error { + return nil + }) + + CloseAfterContext(p, ctx) + testClosing(t, p, cancel) +} + +func TestWithProcessClosing(t *testing.T) { + t.Parallel() + + ctx := context.WithValue(context.Background(), "foo", "bar") + + p := goprocess.WithTeardown(func() error { return nil }) + + ctx = WithProcessClosing(ctx, p) + if ctx.Value("foo") != "bar" { + t.Fatal("context value not preserved") + } + + select { + case <-ctx.Done(): + t.Fatal("should not have been canceled") + case <-time.After(time.Second): + } + + p.Close() + + select { + case <-ctx.Done(): + case <-time.After(time.Second): + t.Fatal("should have been canceled") + } +} + +func TestWithProcessClosed(t *testing.T) { + t.Parallel() + + ctx := context.WithValue(context.Background(), "foo", "bar") + + closeBlock := make(chan struct{}) + p := goprocess.WithTeardown(func() error { <-closeBlock; return nil }) + + ctx = WithProcessClosed(ctx, p) + if ctx.Value("foo") != "bar" { + t.Fatal("context value not preserved") + } + + select { + case <-ctx.Done(): + t.Fatal("should not have been canceled") + case <-time.After(time.Second): + } + + closeWait := make(chan struct{}) + go func() { + defer close(closeWait) + p.Close() + }() + + select { + case <-ctx.Done(): + t.Fatal("should not have been canceled") + case <-time.After(time.Second): + } + + close(closeBlock) + <-closeWait + + select { + case <-ctx.Done(): + case <-time.After(time.Second): + t.Fatal("should have been canceled") + } +} diff --git a/vendor/github.com/jbenet/goprocess/context/derive.go b/vendor/github.com/jbenet/goprocess/context/derive.go new file mode 100644 index 0000000000..92e4d27236 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/context/derive.go @@ -0,0 +1,19 @@ +package goprocessctx + +import ( + "context" + + goprocess "github.com/jbenet/goprocess" +) + +// OnClosingContext derives a context from a given goprocess that will +// be 'Done' when the process is closing +func OnClosingContext(p goprocess.Process) context.Context { + return WithProcessClosing(context.Background(), p) +} + +// OnClosedContext derives a context from a given goprocess that will +// be 'Done' when the process is closed +func OnClosedContext(p goprocess.Process) context.Context { + return WithProcessClosed(context.Background(), p) +} diff --git a/vendor/github.com/jbenet/goprocess/example_test.go b/vendor/github.com/jbenet/goprocess/example_test.go new file mode 100644 index 0000000000..75ce640c7b --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/example_test.go @@ -0,0 +1,37 @@ +package goprocess_test + +import ( + "fmt" + "time" + + "github.com/jbenet/goprocess" +) + +func ExampleGo() { + p := goprocess.Go(func(p goprocess.Process) { + ticker := time.Tick(200 * time.Millisecond) + for { + select { + case <-ticker: + fmt.Println("tick") + case <-p.Closing(): + fmt.Println("closing") + return + } + } + }) + + <-time.After(1100 * time.Millisecond) + p.Close() + fmt.Println("closed") + <-time.After(100 * time.Millisecond) + + // Output: + // tick + // tick + // tick + // tick + // tick + // closing + // closed +} diff --git a/vendor/github.com/jbenet/goprocess/go.mod b/vendor/github.com/jbenet/goprocess/go.mod new file mode 100644 index 0000000000..e22f17f29e --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/go.mod @@ -0,0 +1,5 @@ +module github.com/jbenet/goprocess + +go 1.12 + +require github.com/jbenet/go-cienv v0.1.0 diff --git a/vendor/github.com/jbenet/goprocess/go.sum b/vendor/github.com/jbenet/goprocess/go.sum new file mode 100644 index 0000000000..82fea79cd6 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/go.sum @@ -0,0 +1,2 @@ +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= diff --git a/vendor/github.com/jbenet/goprocess/goprocess.go b/vendor/github.com/jbenet/goprocess/goprocess.go new file mode 100644 index 0000000000..48b76e2ea5 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/goprocess.go @@ -0,0 +1,263 @@ +// Package goprocess introduces a Process abstraction that allows simple +// organization, and orchestration of work. It is much like a WaitGroup, +// and much like a context.Context, but also ensures safe **exactly-once**, +// and well-ordered teardown semantics. +package goprocess + +import ( + "os" + "os/signal" +) + +// Process is the basic unit of work in goprocess. It defines a computation +// with a lifecycle: +// - running (before calling Close), +// - closing (after calling Close at least once), +// - closed (after Close returns, and all teardown has _completed_). +// +// More specifically, it fits this: +// +// p := WithTeardown(tf) // new process is created, it is now running. +// p.AddChild(q) // can register children **before** Closed(). +// go p.Close() // blocks until done running teardown func. +// <-p.Closing() // would now return true. +// <-p.childrenDone() // wait on all children to be done +// p.teardown() // runs the user's teardown function tf. +// p.Close() // now returns, with error teardown returned. +// <-p.Closed() // would now return true. +// +// Processes can be arranged in a process "tree", where children are +// automatically Closed if their parents are closed. (Note, it is actually +// a Process DAG, children may have multiple parents). A process may also +// optionally wait for another to fully Close before beginning to Close. +// This makes it easy to ensure order of operations and proper sequential +// teardown of resurces. For example: +// +// p1 := goprocess.WithTeardown(func() error { +// fmt.Println("closing 1") +// }) +// p2 := goprocess.WithTeardown(func() error { +// fmt.Println("closing 2") +// }) +// p3 := goprocess.WithTeardown(func() error { +// fmt.Println("closing 3") +// }) +// +// p1.AddChild(p2) +// p2.AddChild(p3) +// +// +// go p1.Close() +// go p2.Close() +// go p3.Close() +// +// // Output: +// // closing 3 +// // closing 2 +// // closing 1 +// +// Process is modelled after the UNIX processes group idea, and heavily +// informed by sync.WaitGroup and go.net/context.Context. +// +// In the function documentation of this interface, `p` always refers to +// the self Process. +type Process interface { + + // WaitFor makes p wait for q before exiting. Thus, p will _always_ close + // _after_ q. Note well: a waiting cycle is deadlock. + // + // If p is already Closed, WaitFor panics. This is the same thing as + // calling Add(1) _after_ calling Done() on a wait group. Calling + // WaitFor on an already-closed process is a programming error likely + // due to bad synchronization + WaitFor(q Process) + + // AddChildNoWait registers child as a "child" of Process. As in UNIX, + // when parent is Closed, child is Closed -- child may Close beforehand. + // This is the equivalent of calling: + // + // go func(parent, child Process) { + // <-parent.Closing() + // child.Close() + // }(p, q) + // + // Note: the naming of functions is `AddChildNoWait` and `AddChild` (instead + // of `AddChild` and `AddChildWaitFor`) because: + // - it is the more common operation, + // - explicitness is helpful in the less common case (no waiting), and + // - usual "child" semantics imply parent Processes should wait for children. + AddChildNoWait(q Process) + + // AddChild is the equivalent of calling: + // parent.AddChildNoWait(q) + // parent.WaitFor(q) + // + // It will _panic_ if the parent is already closed. + AddChild(q Process) + + // Go is much like `go`, as it runs a function in a newly spawned goroutine. + // The neat part of Process.Go is that the Process object you call it on will: + // * construct a child Process, and call AddChild(child) on it + // * spawn a goroutine, and call the given function + // * Close the child when the function exits. + // This way, you can rest assured each goroutine you spawn has its very own + // Process context, and that it will be closed when the function exits. + // It is the function's responsibility to respect the Closing of its Process, + // namely it should exit (return) when <-Closing() is ready. It is basically: + // + // func (p Process) Go(f ProcessFunc) Process { + // child := WithParent(p) + // go func () { + // f(child) + // child.Close() + // }() + // } + // + // It is useful to construct simple asynchronous workers, children of p. + Go(f ProcessFunc) Process + + // SetTeardown sets the process's teardown to tf. + SetTeardown(tf TeardownFunc) + + // Close ends the process. Close blocks until the process has completely + // shut down, and any teardown has run _exactly once_. The returned error + // is available indefinitely: calling Close twice returns the same error. + // If the process has already been closed, Close returns immediately. + Close() error + + // CloseAfterChildren calls Close _after_ its children have Closed + // normally (i.e. it _does not_ attempt to close them). + CloseAfterChildren() error + + // Closing is a signal to wait upon. The returned channel is closed + // _after_ Close has been called at least once, but teardown may or may + // not be done yet. The primary use case of Closing is for children who + // need to know when a parent is shutting down, and therefore also shut + // down. + Closing() <-chan struct{} + + // Closed is a signal to wait upon. The returned channel is closed + // _after_ Close has completed; teardown has finished. The primary use case + // of Closed is waiting for a Process to Close without _causing_ the Close. + Closed() <-chan struct{} + + // Err waits until the process is closed, and then returns any error that + // occurred during shutdown. + Err() error +} + +// TeardownFunc is a function used to cleanup state at the end of the +// lifecycle of a Process. +type TeardownFunc func() error + +// ProcessFunc is a function that takes a process. Its main use case is goprocess.Go, +// which spawns a ProcessFunc in its own goroutine, and returns a corresponding +// Process object. +type ProcessFunc func(proc Process) + +var nilProcessFunc = func(Process) {} + +// Go is much like `go`: it runs a function in a newly spawned goroutine. The neat +// part of Go is that it provides Process object to communicate between the +// function and the outside world. Thus, callers can easily WaitFor, or Close the +// function. It is the function's responsibility to respect the Closing of its Process, +// namely it should exit (return) when <-Closing() is ready. It is simply: +// +// func Go(f ProcessFunc) Process { +// p := WithParent(Background()) +// p.Go(f) +// return p +// } +// +// Note that a naive implementation of Go like the following would not work: +// +// func Go(f ProcessFunc) Process { +// return Background().Go(f) +// } +// +// This is because having the process you +func Go(f ProcessFunc) Process { + // return GoChild(Background(), f) + + // we use two processes, one for communication, and + // one for ensuring we wait on the function (unclosable from the outside). + p := newProcess(nil) + waitFor := newProcess(nil) + p.WaitFor(waitFor) // prevent p from closing + go func() { + f(p) + waitFor.Close() // allow p to close. + p.Close() // ensure p closes. + }() + return p +} + +// GoChild is like Go, but it registers the returned Process as a child of parent, +// **before** spawning the goroutine, which ensures proper synchronization with parent. +// It is somewhat like +// +// func GoChild(parent Process, f ProcessFunc) Process { +// p := WithParent(parent) +// p.Go(f) +// return p +// } +// +// And it is similar to the classic WaitGroup use case: +// +// func WaitGroupGo(wg sync.WaitGroup, child func()) { +// wg.Add(1) +// go func() { +// child() +// wg.Done() +// }() +// } +// +func GoChild(parent Process, f ProcessFunc) Process { + p := WithParent(parent) + p.Go(f) + return p +} + +// Spawn is an alias of `Go`. In many contexts, Spawn is a +// well-known Process launching word, which fits our use case. +var Spawn = Go + +// SpawnChild is an alias of `GoChild`. In many contexts, Spawn is a +// well-known Process launching word, which fits our use case. +var SpawnChild = GoChild + +// WithTeardown constructs and returns a Process with a TeardownFunc. +// TeardownFunc tf will be called **exactly-once** when Process is +// Closing, after all Children have fully closed, and before p is Closed. +// In fact, Process p will not be Closed until tf runs and exits. +// See lifecycle in Process doc. +func WithTeardown(tf TeardownFunc) Process { + if tf == nil { + panic("nil tf TeardownFunc") + } + return newProcess(tf) +} + +// WithParent constructs and returns a Process with a given parent. +func WithParent(parent Process) Process { + if parent == nil { + panic("nil parent Process") + } + q := newProcess(nil) + parent.AddChild(q) + return q +} + +// WithSignals returns a Process that will Close() when any given signal fires. +// This is useful to bind Process trees to syscall.SIGTERM, SIGKILL, etc. +func WithSignals(sig ...os.Signal) Process { + p := WithParent(Background()) + c := make(chan os.Signal, 1) + signal.Notify(c, sig...) + go func() { + <-c + signal.Stop(c) + p.Close() + }() + return p +} diff --git a/vendor/github.com/jbenet/goprocess/goprocess_test.go b/vendor/github.com/jbenet/goprocess/goprocess_test.go new file mode 100644 index 0000000000..23de6984c3 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/goprocess_test.go @@ -0,0 +1,788 @@ +package goprocess + +import ( + "fmt" + "runtime" + "syscall" + "testing" + "time" +) + +type tree struct { + Process + c []tree +} + +func setupHierarchy(p Process) tree { + t := func(n Process, ts ...tree) tree { + return tree{n, ts} + } + + a := WithParent(p) + b1 := WithParent(a) + b2 := WithParent(a) + c1 := WithParent(b1) + c2 := WithParent(b1) + c3 := WithParent(b2) + c4 := WithParent(b2) + + return t(a, t(b1, t(c1), t(c2)), t(b2, t(c3), t(c4))) +} + +func TestClosingClosed(t *testing.T) { + + bWait := make(chan struct{}) + a := WithParent(Background()) + a.Go(func(proc Process) { + <-bWait + }) + + Q := make(chan string, 3) + + go func() { + <-a.Closing() + Q <- "closing" + bWait <- struct{}{} + }() + + go func() { + <-a.Closed() + Q <- "closed" + }() + + go func() { + a.Close() + Q <- "closed" + }() + + if q := <-Q; q != "closing" { + t.Error("order incorrect. closing not first") + } + if q := <-Q; q != "closed" { + t.Error("order incorrect. closing not first") + } + if q := <-Q; q != "closed" { + t.Error("order incorrect. closing not first") + } +} + +func TestChildFunc(t *testing.T) { + a := WithParent(Background()) + + wait1 := make(chan struct{}) + wait2 := make(chan struct{}) + wait3 := make(chan struct{}) + wait4 := make(chan struct{}) + + a.Go(func(process Process) { + wait1 <- struct{}{} + <-wait2 + wait3 <- struct{}{} + }) + + go func() { + a.Close() + wait4 <- struct{}{} + }() + + <-wait1 + select { + case <-wait3: + t.Error("should not be closed yet") + case <-wait4: + t.Error("should not be closed yet") + case <-a.Closed(): + t.Error("should not be closed yet") + default: + } + + wait2 <- struct{}{} + + select { + case <-wait3: + case <-time.After(time.Second): + t.Error("should be closed now") + } + + select { + case <-wait4: + case <-time.After(time.Second): + t.Error("should be closed now") + } +} + +func TestTeardownCalledOnce(t *testing.T) { + a := setupHierarchy(Background()) + + onlyOnce := func() func() error { + count := 0 + return func() error { + count++ + if count > 1 { + t.Error("called", count, "times") + } + return nil + } + } + + a.SetTeardown(onlyOnce()) + a.c[0].SetTeardown(onlyOnce()) + a.c[0].c[0].SetTeardown(onlyOnce()) + a.c[0].c[1].SetTeardown(onlyOnce()) + a.c[1].SetTeardown(onlyOnce()) + a.c[1].c[0].SetTeardown(onlyOnce()) + a.c[1].c[1].SetTeardown(onlyOnce()) + + a.c[0].c[0].Close() + a.c[0].c[0].Close() + a.c[0].c[0].Close() + a.c[0].c[0].Close() + a.c[0].Close() + a.c[0].Close() + a.c[0].Close() + a.c[0].Close() + a.Close() + a.Close() + a.Close() + a.Close() + a.c[1].Close() + a.c[1].Close() + a.c[1].Close() + a.c[1].Close() +} + +func TestOnClosedAll(t *testing.T) { + + Q := make(chan string, 10) + p := WithParent(Background()) + a := setupHierarchy(p) + + go onClosedStr(Q, "0", a.c[0]) + go onClosedStr(Q, "10", a.c[1].c[0]) + go onClosedStr(Q, "", a) + go onClosedStr(Q, "00", a.c[0].c[0]) + go onClosedStr(Q, "1", a.c[1]) + go onClosedStr(Q, "01", a.c[0].c[1]) + go onClosedStr(Q, "11", a.c[1].c[1]) + + go p.Close() + + testStrs(t, Q, "00", "01", "10", "11", "0", "1", "") + testStrs(t, Q, "00", "01", "10", "11", "0", "1", "") + testStrs(t, Q, "00", "01", "10", "11", "0", "1", "") + testStrs(t, Q, "00", "01", "10", "11", "0", "1", "") + testStrs(t, Q, "00", "01", "10", "11", "0", "1", "") + testStrs(t, Q, "00", "01", "10", "11", "0", "1", "") +} + +func TestOnClosedLeaves(t *testing.T) { + + Q := make(chan string, 10) + p := WithParent(Background()) + a := setupHierarchy(p) + + go onClosedStr(Q, "0", a.c[0]) + go onClosedStr(Q, "10", a.c[1].c[0]) + go onClosedStr(Q, "", a) + go onClosedStr(Q, "00", a.c[0].c[0]) + go onClosedStr(Q, "1", a.c[1]) + go onClosedStr(Q, "01", a.c[0].c[1]) + go onClosedStr(Q, "11", a.c[1].c[1]) + + go a.c[0].Close() + testStrs(t, Q, "00", "01", "0") + testStrs(t, Q, "00", "01", "0") + testStrs(t, Q, "00", "01", "0") + + go a.c[1].Close() + testStrs(t, Q, "10", "11", "1") + testStrs(t, Q, "10", "11", "1") + testStrs(t, Q, "10", "11", "1") + + go p.Close() + testStrs(t, Q, "") +} + +func TestWaitFor(t *testing.T) { + + Q := make(chan string, 5) + a := WithParent(Background()) + b := WithParent(Background()) + c := WithParent(Background()) + d := WithParent(Background()) + e := WithParent(Background()) + + go onClosedStr(Q, "a", a) + go onClosedStr(Q, "b", b) + go onClosedStr(Q, "c", c) + go onClosedStr(Q, "d", d) + go onClosedStr(Q, "e", e) + + testNone(t, Q) + a.WaitFor(b) + a.WaitFor(c) + b.WaitFor(d) + e.WaitFor(d) + testNone(t, Q) + + go a.Close() // should do nothing. + testNone(t, Q) + + go e.Close() + testNone(t, Q) + + d.Close() + testStrs(t, Q, "d", "e") + testStrs(t, Q, "d", "e") + + c.Close() + testStrs(t, Q, "c") + + b.Close() + testStrs(t, Q, "a", "b") + testStrs(t, Q, "a", "b") +} + +func TestAddChildNoWait(t *testing.T) { + + Q := make(chan string, 5) + a := WithParent(Background()) + b := WithParent(Background()) + c := WithParent(Background()) + d := WithParent(Background()) + e := WithParent(Background()) + + go onClosedStr(Q, "a", a) + go onClosedStr(Q, "b", b) + go onClosedStr(Q, "c", c) + go onClosedStr(Q, "d", d) + go onClosedStr(Q, "e", e) + + testNone(t, Q) + a.AddChildNoWait(b) + a.AddChildNoWait(c) + b.AddChildNoWait(d) + e.AddChildNoWait(d) + testNone(t, Q) + + b.Close() + testStrs(t, Q, "b", "d") + testStrs(t, Q, "b", "d") + + a.Close() + testStrs(t, Q, "a", "c") + testStrs(t, Q, "a", "c") + + e.Close() + testStrs(t, Q, "e") +} + +func TestAddChild(t *testing.T) { + + a := WithParent(Background()) + b := WithParent(Background()) + c := WithParent(Background()) + d := WithParent(Background()) + e := WithParent(Background()) + Q := make(chan string, 5) + + go onClosedStr(Q, "a", a) + go onClosedStr(Q, "b", b) + go onClosedStr(Q, "c", c) + go onClosedStr(Q, "d", d) + go onClosedStr(Q, "e", e) + + testNone(t, Q) + a.AddChild(b) + a.AddChild(c) + b.AddChild(d) + e.AddChild(d) + testNone(t, Q) + + go b.Close() + d.Close() + testStrs(t, Q, "b", "d") + testStrs(t, Q, "b", "d") + + go a.Close() + c.Close() + testStrs(t, Q, "a", "c") + testStrs(t, Q, "a", "c") + + e.Close() + testStrs(t, Q, "e") +} + +func TestGoChildrenClose(t *testing.T) { + + var a, b, c, d, e Process + var ready = make(chan struct{}) + var bWait = make(chan struct{}) + var cWait = make(chan struct{}) + var dWait = make(chan struct{}) + var eWait = make(chan struct{}) + + a = WithParent(Background()) + a.Go(func(p Process) { + b = p + b.Go(func(p Process) { + c = p + ready <- struct{}{} + <-cWait + }) + ready <- struct{}{} + <-bWait + }) + a.Go(func(p Process) { + d = p + d.Go(func(p Process) { + e = p + ready <- struct{}{} + <-eWait + }) + ready <- struct{}{} + <-dWait + }) + + <-ready + <-ready + <-ready + <-ready + + Q := make(chan string, 5) + + go onClosedStr(Q, "a", a) + go onClosedStr(Q, "b", b) + go onClosedStr(Q, "c", c) + go onClosedStr(Q, "d", d) + go onClosedStr(Q, "e", e) + + testNone(t, Q) + go a.Close() + testNone(t, Q) + + bWait <- struct{}{} // relase b + go b.Close() + testNone(t, Q) + + cWait <- struct{}{} // relase c + <-c.Closed() + <-b.Closed() + testStrs(t, Q, "b", "c") + testStrs(t, Q, "b", "c") + + eWait <- struct{}{} // release e + <-e.Closed() + testStrs(t, Q, "e") + + dWait <- struct{}{} // releasse d + <-d.Closed() + <-a.Closed() + testStrs(t, Q, "a", "d") + testStrs(t, Q, "a", "d") +} + +func TestCloseAfterChildren(t *testing.T) { + + var a, b, c, d, e Process + + var ready = make(chan struct{}) + + a = WithParent(Background()) + a.Go(func(p Process) { + b = p + b.Go(func(p Process) { + c = p + ready <- struct{}{} + <-p.Closing() // wait till we're told to close (parents mustnt) + }) + ready <- struct{}{} + // <-p.Closing() // will CloseAfterChildren + }) + a.Go(func(p Process) { + d = p + d.Go(func(p Process) { + e = p + ready <- struct{}{} + <-p.Closing() // wait till we're told to close (parents mustnt) + }) + ready <- struct{}{} + <-p.Closing() + }) + + <-ready + <-ready + <-ready + <-ready + + Q := make(chan string, 5) + + go onClosedStr(Q, "a", a) + go onClosedStr(Q, "b", b) + go onClosedStr(Q, "c", c) + go onClosedStr(Q, "d", d) + go onClosedStr(Q, "e", e) + + aDone := make(chan struct{}) + bDone := make(chan struct{}) + + t.Log("test none when waiting on a") + testNone(t, Q) + go func() { + a.CloseAfterChildren() + aDone <- struct{}{} + }() + testNone(t, Q) + + t.Log("test none when waiting on b") + go func() { + b.CloseAfterChildren() + bDone <- struct{}{} + }() + testNone(t, Q) + + c.Close() + <-bDone + <-b.Closed() + testStrs(t, Q, "b", "c") + testStrs(t, Q, "b", "c") + + // should be fine. + b.CloseAfterChildren() + + e.Close() + testStrs(t, Q, "e") + + d.Close() + <-aDone + <-a.Closed() + testStrs(t, Q, "a", "d") + testStrs(t, Q, "a", "d") +} + +func TestWaitAfterClose(t *testing.T) { + a := WithParent(Background()) + a.Close() + + (func() { + defer func() { + if recover() == nil { + t.Error("process have paniced") + } + }() + + a.Go(func(p Process) { + t.Error("process should not have run") + }) + t.Error("process have paniced") + })() + + child := WithParent(Background()) + defer child.Close() + (func() { + defer func() { + if recover() == nil { + t.Error("process have paniced") + } + }() + + a.AddChild(child) + })() + (func() { + defer func() { + if recover() == nil { + t.Error("process have paniced") + } + }() + + a.WaitFor(child) + t.Error("process have paniced") + })() + + // Child shouldn't have been closed + testNotClosing(t, child) + + // Closing again shouldn't change anything. + a.Close() +} + +func TestCloseWait(t *testing.T) { + unblockWait := make(chan struct{}) + unblockNoWait := make(chan struct{}) + a := WithParent(Background()) + a.Go(func(p Process) { + unblockWait <- struct{}{} + }) + go a.Close() + + <-a.Closing() + + a.WaitFor(Go(func(p Process) { + unblockWait <- struct{}{} + })) + + a.AddChild(Go(func(p Process) { + unblockWait <- struct{}{} + })) + + noWaitProc := Go(func(p Process) { + unblockNoWait <- struct{}{} + }) + + a.AddChildNoWait(noWaitProc) + testClosing(t, noWaitProc) + + unblockTicker := time.NewTicker(50 * time.Millisecond) + defer unblockTicker.Stop() + for i := 0; i < 3; i++ { + select { + case <-a.Closed(): + t.Fatal("expected process to block close") + case <-unblockTicker.C: + } + <-unblockWait + } + + testClosed(t, a) + <-unblockNoWait + testClosed(t, noWaitProc) +} + +func TestErr(t *testing.T) { + err := Go(func(p Process) {}).Err() + if err != nil { + t.Error(err) + } + testErr := fmt.Errorf("foobar") + p := WithTeardown(func() error { + return testErr + }) + done := make(chan struct{}) + defer func() { <-done }() + go func() { + defer close(done) + if err := p.Err(); err != testErr { + t.Errorf("expected err %q, got %q", testErr, err) + } + }() + select { + case <-time.After(time.Millisecond * 50): + case <-done: + t.Error("err shouldn't return till process is closed") + } + p.Close() +} + +func TestGoClosing(t *testing.T) { + + var ready = make(chan struct{}) + a := WithParent(Background()) + a.Go(func(p Process) { + + // this should be fine. + a.Go(func(p Process) { + ready <- struct{}{} + }) + + // set a to close. should not fully close until after this func returns. + go a.Close() + + // wait until a is marked as closing + <-a.Closing() + + // this should also be fine. + a.Go(func(p Process) { + + select { + case <-p.Closing(): + // p should be marked as closing + default: + t.Error("not marked closing when it should be.") + } + + ready <- struct{}{} + + // parent shouldn't close first. + select { + case <-a.Closed(): + t.Error("should not be closed") + case <-time.After(100 * time.Millisecond): + } + }) + + ready <- struct{}{} + }) + + <-ready + <-ready + <-ready +} + +func TestBackground(t *testing.T) { + // test it hangs indefinitely: + b := Background() + go b.Close() + + select { + case <-time.After(50 * time.Millisecond): + case <-b.Closing(): + t.Error("b.Closing() closed :(") + } + + done := make(chan struct{}) + proc := b.Go(func(p Process) { + <-done + }) + select { + case <-time.After(50 * time.Millisecond): + case <-proc.Closing(): + t.Error("proc closed") + } + close(done) + testClosed(t, proc) + + proc2 := WithTeardown(func() error { + return nil + }) + proc2.WaitFor(b) + + go proc.Close() + + testClosing(t, proc) + + select { + case <-time.After(50 * time.Millisecond): + case <-proc2.Closed(): + t.Error("proc2 closed") + } +} + +func TestWithSignals(t *testing.T) { + p := WithSignals(syscall.SIGABRT) + testNotClosed(t, p) + + syscall.Kill(syscall.Getpid(), syscall.SIGABRT) + testClosed(t, p) +} + +func TestMemoryLeak(t *testing.T) { + iters := 100 + fanout := 10 + P := newProcess(nil) + var memories []float32 + + measure := func(str string) float32 { + s := new(runtime.MemStats) + runtime.ReadMemStats(s) + //fmt.Printf("%d ", s.HeapObjects) + //fmt.Printf("%d ", len(P.children)) + //fmt.Printf("%d ", runtime.NumGoroutine()) + //fmt.Printf("%s: %dk\n", str, s.HeapAlloc/1000) + return float32(s.HeapAlloc) / 1000 + } + + spawn := func() []Process { + var ps []Process + // Spawn processes + for i := 0; i < fanout; i++ { + p := WithParent(P) + ps = append(ps, p) + + for i := 0; i < fanout; i++ { + p2 := WithParent(p) + ps = append(ps, p2) + + for i := 0; i < fanout; i++ { + p3 := WithParent(p2) + ps = append(ps, p3) + } + } + } + return ps + } + + // Read initial memory stats + measure("initial") + for i := 0; i < iters; i++ { + ps := spawn() + //measure("alloc") // read after alloc + + // Close all processes + for _, p := range ps { + p.Close() + <-p.Closed() + } + ps = nil + + //measure("dealloc") // read after dealloc, but before gc + + // wait until all/most goroutines finish + <-time.After(time.Millisecond) + + // Run GC + runtime.GC() + memories = append(memories, measure("gc")) // read after gc + } + + memoryInit := memories[10] + percentGrowth := 100 * (memories[len(memories)-1] - memoryInit) / memoryInit + fmt.Printf("Memory growth after %d iteration with each %d processes: %.2f%% after %dk\n", iters, fanout*fanout*fanout, percentGrowth, int(memoryInit)) + +} + +func testClosing(t *testing.T, p Process) { + select { + case <-p.Closing(): + case <-time.After(50 * time.Millisecond): + t.Fatal("should be closing") + } +} + +func testNotClosing(t *testing.T, p Process) { + select { + case <-p.Closing(): + t.Fatal("should not be closing") + case <-p.Closed(): + t.Fatal("should not be closed") + default: + } +} + +func testClosed(t *testing.T, p Process) { + select { + case <-p.Closed(): + case <-time.After(50 * time.Millisecond): + t.Fatal("should be closed") + } +} + +func testNotClosed(t *testing.T, p Process) { + select { + case <-p.Closed(): + t.Fatal("should not be closed") + case <-time.After(50 * time.Millisecond): + } +} + +func testNone(t *testing.T, c <-chan string) { + select { + case out := <-c: + t.Fatal("none should be closed", out) + default: + } +} + +func testStrs(t *testing.T, Q <-chan string, ss ...string) { + s1 := <-Q + for _, s2 := range ss { + if s1 == s2 { + return + } + } + t.Error("context not in group:", s1, ss) +} + +func onClosedStr(Q chan<- string, s string, p Process) { + <-p.Closed() + Q <- s +} diff --git a/vendor/github.com/jbenet/goprocess/impl-mutex.go b/vendor/github.com/jbenet/goprocess/impl-mutex.go new file mode 100644 index 0000000000..535e609acc --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/impl-mutex.go @@ -0,0 +1,299 @@ +package goprocess + +import ( + "sync" +) + +// process implements Process +type process struct { + children map[*processLink]struct{} // process to close with us + waitfors map[*processLink]struct{} // process to only wait for + waiters []*processLink // processes that wait for us. for gc. + + teardown TeardownFunc // called to run the teardown logic. + closing chan struct{} // closed once close starts. + closed chan struct{} // closed once close is done. + closeErr error // error to return to clients of Close() + + sync.Mutex +} + +// newProcess constructs and returns a Process. +// It will call tf TeardownFunc exactly once: +// **after** all children have fully Closed, +// **after** entering <-Closing(), and +// **before** <-Closed(). +func newProcess(tf TeardownFunc) *process { + return &process{ + teardown: tf, + closed: make(chan struct{}), + closing: make(chan struct{}), + waitfors: make(map[*processLink]struct{}), + children: make(map[*processLink]struct{}), + } +} + +func (p *process) WaitFor(q Process) { + if q == nil { + panic("waiting for nil process") + } + + p.Lock() + defer p.Unlock() + + select { + case <-p.Closed(): + panic("Process cannot wait after being closed") + default: + } + + pl := newProcessLink(p, q) + if p.waitfors == nil { + // This may be nil when we're closing. In close, we'll keep + // reading this map till it stays nil. + p.waitfors = make(map[*processLink]struct{}, 1) + } + p.waitfors[pl] = struct{}{} + go pl.AddToChild() +} + +func (p *process) AddChildNoWait(child Process) { + if child == nil { + panic("adding nil child process") + } + + p.Lock() + defer p.Unlock() + + select { + case <-p.Closing(): + // Either closed or closing, close child immediately. This is + // correct because we aren't asked to _wait_ on this child. + go child.Close() + // Wait for the child to start closing so the child is in the + // "correct" state after this function finishes (see #17). + <-child.Closing() + return + default: + } + + pl := newProcessLink(p, child) + p.children[pl] = struct{}{} + go pl.AddToChild() +} + +func (p *process) AddChild(child Process) { + if child == nil { + panic("adding nil child process") + } + + p.Lock() + defer p.Unlock() + + pl := newProcessLink(p, child) + + select { + case <-p.Closed(): + // AddChild must not be called on a dead process. Maybe that's + // too strict? + panic("Process cannot add children after being closed") + default: + } + + select { + case <-p.Closing(): + // Already closing, close child in background. + go child.Close() + // Wait for the child to start closing so the child is in the + // "correct" state after this function finishes (see #17). + <-child.Closing() + default: + // Only add the child when not closing. When closing, just add + // it to the "waitfors" list. + p.children[pl] = struct{}{} + } + + if p.waitfors == nil { + // This may be be nil when we're closing. In close, we'll keep + // reading this map till it stays nil. + p.waitfors = make(map[*processLink]struct{}, 1) + } + p.waitfors[pl] = struct{}{} + go pl.AddToChild() +} + +func (p *process) Go(f ProcessFunc) Process { + child := newProcess(nil) + waitFor := newProcess(nil) + child.WaitFor(waitFor) // prevent child from closing + + // add child last, to prevent a closing parent from + // closing all of them prematurely, before running the func. + p.AddChild(child) + go func() { + f(child) + waitFor.Close() // allow child to close. + child.CloseAfterChildren() // close to tear down. + }() + return child +} + +// SetTeardown to assign a teardown function +func (p *process) SetTeardown(tf TeardownFunc) { + if tf == nil { + panic("cannot set nil TeardownFunc") + } + + p.Lock() + if p.teardown != nil { + panic("cannot SetTeardown twice") + } + + p.teardown = tf + select { + case <-p.Closed(): + // Call the teardown function, but don't set the error. We can't + // change that after we shut down. + tf() + default: + } + p.Unlock() +} + +// Close is the external close function. +// it's a wrapper around internalClose that waits on Closed() +func (p *process) Close() error { + p.Lock() + + // if already closing, or closed, get out. (but wait!) + select { + case <-p.Closing(): + p.Unlock() + <-p.Closed() + return p.closeErr + default: + } + + p.doClose() + p.Unlock() + return p.closeErr +} + +func (p *process) Closing() <-chan struct{} { + return p.closing +} + +func (p *process) Closed() <-chan struct{} { + return p.closed +} + +func (p *process) Err() error { + <-p.Closed() + return p.closeErr +} + +// the _actual_ close process. +func (p *process) doClose() { + // this function is only be called once (protected by p.Lock()). + // and it will panic (on closing channels) otherwise. + + close(p.closing) // signal that we're shutting down (Closing) + + // We won't add any children after we start closing so we can do this + // once. + for plc, _ := range p.children { + child := plc.Child() + if child != nil { // check because child may already have been removed. + go child.Close() // force all children to shut down + } + + // safe to call multiple times per link + plc.ParentClear() + } + p.children = nil // clear them. release memory. + + // We may repeatedly continue to add waiters while we wait to close so + // we have to do this in a loop. + for len(p.waitfors) > 0 { + // we must be careful not to iterate over waitfors directly, as it may + // change under our feet. + wf := p.waitfors + p.waitfors = nil // clear them. release memory. + for w, _ := range wf { + // Here, we wait UNLOCKED, so that waitfors who are in the middle of + // adding a child to us can finish. we will immediately close the child. + p.Unlock() + <-w.ChildClosed() // wait till all waitfors are fully closed (before teardown) + p.Lock() + + // safe to call multiple times per link + w.ParentClear() + } + } + + if p.teardown != nil { + p.closeErr = p.teardown() // actually run the close logic (ok safe to teardown) + } + close(p.closed) // signal that we're shut down (Closed) + + // go remove all the parents from the process links. optimization. + go func(waiters []*processLink) { + for _, pl := range waiters { + pl.ClearChild() + pr, ok := pl.Parent().(*process) + if !ok { + // parent has already been called to close + continue + } + pr.Lock() + delete(pr.waitfors, pl) + delete(pr.children, pl) + pr.Unlock() + } + }(p.waiters) // pass in so + p.waiters = nil // clear them. release memory. +} + +// We will only wait on the children we have now. +// We will not wait on children added subsequently. +// this may change in the future. +func (p *process) CloseAfterChildren() error { + p.Lock() + select { + case <-p.Closed(): + p.Unlock() + return p.Close() // get error. safe, after p.Closed() + default: + } + p.Unlock() + + // here only from one goroutine. + + nextToWaitFor := func() Process { + p.Lock() + defer p.Unlock() + for e, _ := range p.waitfors { + c := e.Child() + if c == nil { + continue + } + + select { + case <-c.Closed(): + default: + return c + } + } + return nil + } + + // wait for all processes we're waiting for are closed. + // the semantics here are simple: we will _only_ close + // if there are no processes currently waiting for. + for next := nextToWaitFor(); next != nil; next = nextToWaitFor() { + <-next.Closed() + } + + // YAY! we're done. close + return p.Close() +} diff --git a/vendor/github.com/jbenet/goprocess/link.go b/vendor/github.com/jbenet/goprocess/link.go new file mode 100644 index 0000000000..f46d81f68d --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/link.go @@ -0,0 +1,128 @@ +package goprocess + +import ( + "sync" +) + +// closedCh is an alread-closed channel. used to return +// in cases where we already know a channel is closed. +var closedCh chan struct{} + +func init() { + closedCh = make(chan struct{}) + close(closedCh) +} + +// a processLink is an internal bookkeeping datastructure. +// it's used to form a relationship between two processes. +// It is mostly for keeping memory usage down (letting +// children close and be garbage-collected). +type processLink struct { + // guards all fields. + // DO NOT HOLD while holding process locks. + // it may be slow, and could deadlock if not careful. + sync.Mutex + parent Process + child Process +} + +func newProcessLink(p, c Process) *processLink { + return &processLink{ + parent: p, + child: c, + } +} + +// Closing returns whether the child is closing +func (pl *processLink) ChildClosing() <-chan struct{} { + // grab a hold of it, and unlock, as .Closing may block. + pl.Lock() + child := pl.child + pl.Unlock() + + if child == nil { // already closed? memory optimization. + return closedCh + } + return child.Closing() +} + +func (pl *processLink) ChildClosed() <-chan struct{} { + // grab a hold of it, and unlock, as .Closed may block. + pl.Lock() + child := pl.child + pl.Unlock() + + if child == nil { // already closed? memory optimization. + return closedCh + } + return child.Closed() +} + +func (pl *processLink) ChildClose() { + // grab a hold of it, and unlock, as .Closed may block. + pl.Lock() + child := pl.child + pl.Unlock() + + if child != nil { // already closed? memory optimization. + child.Close() + } +} + +func (pl *processLink) ClearChild() { + pl.Lock() + pl.child = nil + pl.Unlock() +} + +func (pl *processLink) ParentClear() { + pl.Lock() + pl.parent = nil + pl.Unlock() +} + +func (pl *processLink) Child() Process { + pl.Lock() + defer pl.Unlock() + return pl.child +} + +func (pl *processLink) Parent() Process { + pl.Lock() + defer pl.Unlock() + return pl.parent +} + +func (pl *processLink) AddToChild() { + cp := pl.Child() + + // is it a *process ? if not... panic. + var c *process + switch cp := cp.(type) { + case *process: + c = cp + case *bgProcess: + // Background process never closes so we don't need to do + // anything. + return + default: + panic("goprocess does not yet support other process impls.") + } + + // first, is it Closed? + c.Lock() + select { + case <-c.Closed(): + c.Unlock() + + // already closed. must not add. + // we must clear it, though. do so without the lock. + pl.ClearChild() + return + + default: + // put the process link into q's waiters + c.waiters = append(c.waiters, pl) + c.Unlock() + } +} diff --git a/vendor/github.com/jbenet/goprocess/package.json b/vendor/github.com/jbenet/goprocess/package.json new file mode 100644 index 0000000000..0748533776 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/package.json @@ -0,0 +1,14 @@ +{ + "author": "whyrusleeping", + "bugs": { + "url": "https://github.com/jbenet/goprocess" + }, + "gx": { + "dvcsimport": "github.com/jbenet/goprocess" + }, + "gxVersion": "0.8.0", + "language": "go", + "license": "", + "name": "goprocess", + "version": "1.0.0" +} diff --git a/vendor/github.com/jbenet/goprocess/periodic/README.md b/vendor/github.com/jbenet/goprocess/periodic/README.md new file mode 100644 index 0000000000..7a2c55db1c --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/periodic/README.md @@ -0,0 +1,4 @@ +# goprocess/periodic - periodic process creation + +- goprocess: https://github.com/jbenet/goprocess +- Godoc: https://godoc.org/github.com/jbenet/goprocess/periodic diff --git a/vendor/github.com/jbenet/goprocess/periodic/examples_test.go b/vendor/github.com/jbenet/goprocess/periodic/examples_test.go new file mode 100644 index 0000000000..4c73d76a25 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/periodic/examples_test.go @@ -0,0 +1,85 @@ +package periodicproc_test + +import ( + "fmt" + "time" + + goprocess "github.com/jbenet/goprocess" + periodicproc "github.com/jbenet/goprocess/periodic" +) + +func ExampleEvery() { + tock := make(chan struct{}) + + i := 0 + p := periodicproc.Every(time.Second, func(proc goprocess.Process) { + tock <- struct{}{} + fmt.Printf("hello %d\n", i) + i++ + }) + + <-tock + <-tock + <-tock + p.Close() + + // Output: + // hello 0 + // hello 1 + // hello 2 +} + +func ExampleTick() { + p := periodicproc.Tick(time.Second, func(proc goprocess.Process) { + fmt.Println("tick") + }) + + <-time.After(3*time.Second + 500*time.Millisecond) + p.Close() + + // Output: + // tick + // tick + // tick +} + +func ExampleTickGo() { + + // with TickGo, execution is not rate limited, + // there can be many in-flight simultaneously + + wait := make(chan struct{}) + p := periodicproc.TickGo(time.Second, func(proc goprocess.Process) { + fmt.Println("tick") + <-wait + }) + + <-time.After(3*time.Second + 500*time.Millisecond) + + wait <- struct{}{} + wait <- struct{}{} + wait <- struct{}{} + p.Close() // blocks us until all children are closed. + + // Output: + // tick + // tick + // tick +} + +func ExampleOnSignal() { + sig := make(chan struct{}) + p := periodicproc.OnSignal(sig, func(proc goprocess.Process) { + fmt.Println("fire!") + }) + + sig <- struct{}{} + sig <- struct{}{} + sig <- struct{}{} + p.Close() + + // Output: + // fire! + // fire! + // fire! +} diff --git a/vendor/github.com/jbenet/goprocess/periodic/periodic.go b/vendor/github.com/jbenet/goprocess/periodic/periodic.go new file mode 100644 index 0000000000..823c43dcf4 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/periodic/periodic.go @@ -0,0 +1,232 @@ +// Package periodic is part of github.com/jbenet/goprocess. +// It provides a simple periodic processor that calls a function +// periodically based on some options. +// +// For example: +// +// // use a time.Duration +// p := periodicproc.Every(time.Second, func(proc goprocess.Process) { +// fmt.Printf("the time is %s and all is well", time.Now()) +// }) +// +// <-time.After(5*time.Second) +// p.Close() +// +// // use a time.Time channel (like time.Ticker) +// p := periodicproc.Tick(time.Tick(time.Second), func(proc goprocess.Process) { +// fmt.Printf("the time is %s and all is well", time.Now()) +// }) +// +// <-time.After(5*time.Second) +// p.Close() +// +// // or arbitrary signals +// signal := make(chan struct{}) +// p := periodicproc.OnSignal(signal, func(proc goprocess.Process) { +// fmt.Printf("the time is %s and all is well", time.Now()) +// }) +// +// signal<- struct{}{} +// signal<- struct{}{} +// <-time.After(5 * time.Second) +// signal<- struct{}{} +// p.Close() +// +package periodicproc + +import ( + "time" + + gp "github.com/jbenet/goprocess" +) + +// Every calls the given ProcessFunc at periodic intervals. Internally, it uses +// <-time.After(interval), so it will have the behavior of waiting _at least_ +// interval in between calls. If you'd prefer the time.Ticker behavior, use +// periodicproc.Tick instead. +// This is sequentially rate limited, only one call will be in-flight at a time. +func Every(interval time.Duration, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(func(proc gp.Process) { + for { + select { + case <-time.After(interval): + select { + case <-proc.Go(procfunc).Closed(): // spin it out as a child, and wait till it's done. + case <-proc.Closing(): // we're told to close + return + } + case <-proc.Closing(): // we're told to close + return + } + } + }) +} + +// EveryGo calls the given ProcessFunc at periodic intervals. Internally, it uses +// <-time.After(interval) +// This is not rate limited, multiple calls could be in-flight at the same time. +func EveryGo(interval time.Duration, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(func(proc gp.Process) { + for { + select { + case <-time.After(interval): + proc.Go(procfunc) + case <-proc.Closing(): // we're told to close + return + } + } + }) +} + +// Tick constructs a ticker with interval, and calls the given ProcessFunc every +// time the ticker fires. +// This is sequentially rate limited, only one call will be in-flight at a time. +// +// p := periodicproc.Tick(time.Second, func(proc goprocess.Process) { +// fmt.Println("fire!") +// }) +// +// <-time.After(3 * time.Second) +// p.Close() +// +// // Output: +// // fire! +// // fire! +// // fire! +func Tick(interval time.Duration, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(func(proc gp.Process) { + ticker := time.NewTicker(interval) + callOnTicker(ticker.C, procfunc)(proc) + ticker.Stop() + }) +} + +// TickGo constructs a ticker with interval, and calls the given ProcessFunc every +// time the ticker fires. +// This is not rate limited, multiple calls could be in-flight at the same time. +// +// p := periodicproc.TickGo(time.Second, func(proc goprocess.Process) { +// fmt.Println("fire!") +// <-time.After(10 * time.Second) // will not block sequential execution +// }) +// +// <-time.After(3 * time.Second) +// p.Close() +// +// // Output: +// // fire! +// // fire! +// // fire! +func TickGo(interval time.Duration, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(func(proc gp.Process) { + ticker := time.NewTicker(interval) + goCallOnTicker(ticker.C, procfunc)(proc) + ticker.Stop() + }) +} + +// Ticker calls the given ProcessFunc every time the ticker fires. +// This is sequentially rate limited, only one call will be in-flight at a time. +func Ticker(ticker <-chan time.Time, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(callOnTicker(ticker, procfunc)) +} + +// TickerGo calls the given ProcessFunc every time the ticker fires. +// This is not rate limited, multiple calls could be in-flight at the same time. +func TickerGo(ticker <-chan time.Time, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(goCallOnTicker(ticker, procfunc)) +} + +func callOnTicker(ticker <-chan time.Time, pf gp.ProcessFunc) gp.ProcessFunc { + return func(proc gp.Process) { + for { + select { + case <-ticker: + select { + case <-proc.Go(pf).Closed(): // spin it out as a child, and wait till it's done. + case <-proc.Closing(): // we're told to close + return + } + case <-proc.Closing(): // we're told to close + return + } + } + } +} + +func goCallOnTicker(ticker <-chan time.Time, pf gp.ProcessFunc) gp.ProcessFunc { + return func(proc gp.Process) { + for { + select { + case <-ticker: + proc.Go(pf) + case <-proc.Closing(): // we're told to close + return + } + } + } +} + +// OnSignal calls the given ProcessFunc every time the signal fires, and waits for it to exit. +// This is sequentially rate limited, only one call will be in-flight at a time. +// +// sig := make(chan struct{}) +// p := periodicproc.OnSignal(sig, func(proc goprocess.Process) { +// fmt.Println("fire!") +// <-time.After(time.Second) // delays sequential execution by 1 second +// }) +// +// sig<- struct{} +// sig<- struct{} +// sig<- struct{} +// +// // Output: +// // fire! +// // fire! +// // fire! +func OnSignal(sig <-chan struct{}, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(func(proc gp.Process) { + for { + select { + case <-sig: + select { + case <-proc.Go(procfunc).Closed(): // spin it out as a child, and wait till it's done. + case <-proc.Closing(): // we're told to close + return + } + case <-proc.Closing(): // we're told to close + return + } + } + }) +} + +// OnSignalGo calls the given ProcessFunc every time the signal fires. +// This is not rate limited, multiple calls could be in-flight at the same time. +// +// sig := make(chan struct{}) +// p := periodicproc.OnSignalGo(sig, func(proc goprocess.Process) { +// fmt.Println("fire!") +// <-time.After(time.Second) // wont block execution +// }) +// +// sig<- struct{} +// sig<- struct{} +// sig<- struct{} +// +// // Output: +// // fire! +// // fire! +// // fire! +func OnSignalGo(sig <-chan struct{}, procfunc gp.ProcessFunc) gp.Process { + return gp.Go(func(proc gp.Process) { + for { + select { + case <-sig: + proc.Go(procfunc) + case <-proc.Closing(): // we're told to close + return + } + } + }) +} diff --git a/vendor/github.com/jbenet/goprocess/periodic/periodic_test.go b/vendor/github.com/jbenet/goprocess/periodic/periodic_test.go new file mode 100644 index 0000000000..c5b43b88b7 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/periodic/periodic_test.go @@ -0,0 +1,260 @@ +package periodicproc + +import ( + "testing" + "time" + + ci "github.com/jbenet/go-cienv" + gp "github.com/jbenet/goprocess" +) + +var ( + grace = time.Millisecond * 5 + interval = time.Millisecond * 10 + timeout = time.Second * 5 +) + +func init() { + if ci.IsRunning() { + grace = time.Millisecond * 500 + interval = time.Millisecond * 1000 + timeout = time.Second * 15 + } +} + +func between(min, diff, max time.Duration) bool { + return min <= diff && diff <= max +} + +func testBetween(t *testing.T, min, diff, max time.Duration) { + if !between(min, diff, max) { + t.Error("time diff incorrect:", min, diff, max) + } +} + +type intervalFunc func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) + +func testSeq(t *testing.T, toTest intervalFunc) { + t.Parallel() + + last := time.Now() + times := make(chan time.Time, 10) + p := toTest(times, nil) + + for i := 0; i < 5; i++ { + next := <-times + testBetween(t, interval-grace, next.Sub(last), interval+grace) + last = next + } + + go p.Close() + select { + case <-p.Closed(): + case <-time.After(timeout): + t.Error("proc failed to close") + } +} + +func testSeqWait(t *testing.T, toTest intervalFunc) { + t.Parallel() + + last := time.Now() + times := make(chan time.Time, 10) + wait := make(chan struct{}) + p := toTest(times, wait) + + for i := 0; i < 5; i++ { + next := <-times + testBetween(t, interval-grace, next.Sub(last), interval+grace) + + <-time.After(interval * 2) // make it wait. + last = time.Now() // make it now (sequential) + wait <- struct{}{} // release it. + } + + go p.Close() + + select { + case <-p.Closed(): + case <-time.After(timeout): + t.Error("proc failed to close") + } +} + +func testSeqNoWait(t *testing.T, toTest intervalFunc) { + t.Parallel() + + last := time.Now() + times := make(chan time.Time, 10) + wait := make(chan struct{}) + p := toTest(times, wait) + + for i := 0; i < 5; i++ { + next := <-times + testBetween(t, 0, next.Sub(last), interval+grace) // min of 0 + + <-time.After(interval * 2) // make it wait. + last = time.Now() // make it now (sequential) + wait <- struct{}{} // release it. + } + + go p.Close() + +end: + select { + case wait <- struct{}{}: // drain any extras. + goto end + case <-p.Closed(): + case <-time.After(timeout): + t.Error("proc failed to close") + } +} + +func testParallel(t *testing.T, toTest intervalFunc) { + t.Parallel() + + last := time.Now() + times := make(chan time.Time, 10) + wait := make(chan struct{}) + p := toTest(times, wait) + + for i := 0; i < 5; i++ { + next := <-times + testBetween(t, interval-grace, next.Sub(last), interval+grace) + last = next + + <-time.After(interval * 2) // make it wait. + wait <- struct{}{} // release it. + } + + go p.Close() + +end: + select { + case wait <- struct{}{}: // drain any extras. + goto end + case <-p.Closed(): + case <-time.After(timeout): + t.Error("proc failed to close") + } +} + +func TestEverySeq(t *testing.T) { + testSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return Every(interval, func(proc gp.Process) { + times <- time.Now() + }) + }) +} + +func TestEverySeqWait(t *testing.T) { + testSeqWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return Every(interval, func(proc gp.Process) { + times <- time.Now() + select { + case <-wait: + case <-proc.Closing(): + } + }) + }) +} + +func TestEveryGoSeq(t *testing.T) { + testSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return EveryGo(interval, func(proc gp.Process) { + times <- time.Now() + }) + }) +} + +func TestEveryGoSeqParallel(t *testing.T) { + testParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return EveryGo(interval, func(proc gp.Process) { + times <- time.Now() + select { + case <-wait: + case <-proc.Closing(): + } + }) + }) +} + +func TestTickSeq(t *testing.T) { + testSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return Tick(interval, func(proc gp.Process) { + times <- time.Now() + }) + }) +} + +func TestTickSeqNoWait(t *testing.T) { + testSeqNoWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return Tick(interval, func(proc gp.Process) { + times <- time.Now() + select { + case <-wait: + case <-proc.Closing(): + } + }) + }) +} + +func TestTickGoSeq(t *testing.T) { + testSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return TickGo(interval, func(proc gp.Process) { + times <- time.Now() + }) + }) +} + +func TestTickGoSeqParallel(t *testing.T) { + testParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return TickGo(interval, func(proc gp.Process) { + times <- time.Now() + select { + case <-wait: + case <-proc.Closing(): + } + }) + }) +} + +func TestTickerSeq(t *testing.T) { + testSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return Ticker(time.Tick(interval), func(proc gp.Process) { + times <- time.Now() + }) + }) +} + +func TestTickerSeqNoWait(t *testing.T) { + testSeqNoWait(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return Ticker(time.Tick(interval), func(proc gp.Process) { + times <- time.Now() + select { + case <-wait: + case <-proc.Closing(): + } + }) + }) +} + +func TestTickerGoSeq(t *testing.T) { + testSeq(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return TickerGo(time.Tick(interval), func(proc gp.Process) { + times <- time.Now() + }) + }) +} + +func TestTickerGoParallel(t *testing.T) { + testParallel(t, func(times chan<- time.Time, wait <-chan struct{}) (proc gp.Process) { + return TickerGo(time.Tick(interval), func(proc gp.Process) { + times <- time.Now() + select { + case <-wait: + case <-proc.Closing(): + } + }) + }) +} diff --git a/vendor/github.com/jbenet/goprocess/ratelimit/README.md b/vendor/github.com/jbenet/goprocess/ratelimit/README.md new file mode 100644 index 0000000000..3c91185e4e --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/ratelimit/README.md @@ -0,0 +1,4 @@ +# goprocess/ratelimit - ratelimit children creation + +- goprocess: https://github.com/jbenet/goprocess +- Godoc: https://godoc.org/github.com/jbenet/goprocess/ratelimit diff --git a/vendor/github.com/jbenet/goprocess/ratelimit/ratelimit.go b/vendor/github.com/jbenet/goprocess/ratelimit/ratelimit.go new file mode 100644 index 0000000000..1838d05374 --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/ratelimit/ratelimit.go @@ -0,0 +1,68 @@ +// Package ratelimit is part of github.com/jbenet/goprocess. +// It provides a simple process that ratelimits child creation. +// This is done internally with a channel/semaphore. +// So the call `RateLimiter.LimitedGo` may block until another +// child is Closed(). +package ratelimit + +import ( + process "github.com/jbenet/goprocess" +) + +// RateLimiter limits the spawning of children. It does so +// with an internal semaphore. Note that Go will continue +// to be the unlimited process.Process.Go, and ONLY the +// added function `RateLimiter.LimitedGo` will honor the +// limit. This is to improve readability and avoid confusion +// for the reader, particularly if code changes over time. +type RateLimiter struct { + process.Process + + limiter chan struct{} +} + +func NewRateLimiter(parent process.Process, limit int) *RateLimiter { + proc := process.WithParent(parent) + return &RateLimiter{Process: proc, limiter: LimitChan(limit)} +} + +// LimitedGo creates a new process, adds it as a child, and spawns the +// ProcessFunc f in its own goroutine, but may block according to the +// internal rate limit. It is equivalent to: +// +// func(f process.ProcessFunc) { +// <-limitch +// p.Go(func (child process.Process) { +// f(child) +// f.Close() // make sure its children close too! +// limitch<- struct{}{} +// }) +/// } +// +// It is useful to construct simple asynchronous workers, children of p, +// and rate limit their creation, to avoid spinning up too many, too fast. +// This is great for providing backpressure to producers. +func (rl *RateLimiter) LimitedGo(f process.ProcessFunc) { + + <-rl.limiter + p := rl.Go(f) + + // this <-closed() is here because the child may have spawned + // children of its own, and our rate limiter should capture that. + go func() { + <-p.Closed() + rl.limiter <- struct{}{} + }() +} + +// LimitChan returns a rate-limiting channel. it is the usual, simple, +// golang-idiomatic rate-limiting semaphore. This function merely +// initializes it with certain buffer size, and sends that many values, +// so it is ready to be used. +func LimitChan(limit int) chan struct{} { + limitch := make(chan struct{}, limit) + for i := 0; i < limit; i++ { + limitch <- struct{}{} + } + return limitch +} diff --git a/vendor/github.com/jbenet/goprocess/ratelimit/ratelimit_test.go b/vendor/github.com/jbenet/goprocess/ratelimit/ratelimit_test.go new file mode 100644 index 0000000000..519a4abb0d --- /dev/null +++ b/vendor/github.com/jbenet/goprocess/ratelimit/ratelimit_test.go @@ -0,0 +1,98 @@ +package ratelimit + +import ( + "testing" + "time" + + process "github.com/jbenet/goprocess" +) + +func TestRateLimitLimitedGoBlocks(t *testing.T) { + numChildren := 6 + + t.Logf("create a rate limiter with limit of %d", numChildren/2) + rl := NewRateLimiter(process.Background(), numChildren/2) + + doneSpawning := make(chan struct{}) + childClosing := make(chan struct{}) + + t.Log("spawn 6 children with LimitedGo.") + go func() { + for i := 0; i < numChildren; i++ { + rl.LimitedGo(func(child process.Process) { + // hang until we drain childClosing + childClosing <- struct{}{} + }) + t.Logf("spawned %d", i) + } + close(doneSpawning) + }() + + t.Log("should have blocked.") + select { + case <-doneSpawning: + t.Error("did not block") + case <-time.After(time.Millisecond): // for scheduler + t.Log("blocked") + } + + t.Logf("drain %d children so they close", numChildren/2) + for i := 0; i < numChildren/2; i++ { + t.Logf("closing %d", i) + <-childClosing // consume child cloing + t.Logf("closed %d", i) + } + + t.Log("should be done spawning.") + select { + case <-doneSpawning: + case <-time.After(100 * time.Millisecond): // for scheduler + t.Error("still blocked...") + } + + t.Logf("drain %d children so they close", numChildren/2) + for i := 0; i < numChildren/2; i++ { + <-childClosing + t.Logf("closed %d", i) + } + + rl.Close() // ensure everyone's closed. +} + +func TestRateLimitGoDoesntBlock(t *testing.T) { + numChildren := 6 + + t.Logf("create a rate limiter with limit of %d", numChildren/2) + rl := NewRateLimiter(process.Background(), numChildren/2) + + doneSpawning := make(chan struct{}) + childClosing := make(chan struct{}) + + t.Log("spawn 6 children with usual Process.Go.") + go func() { + for i := 0; i < numChildren; i++ { + rl.Go(func(child process.Process) { + // hang until we drain childClosing + childClosing <- struct{}{} + }) + t.Logf("spawned %d", i) + } + close(doneSpawning) + }() + + t.Log("should not have blocked.") + select { + case <-doneSpawning: + t.Log("did not block") + case <-time.After(100 * time.Millisecond): // for scheduler + t.Error("process.Go blocked. it should not.") + } + + t.Log("drain children so they close") + for i := 0; i < numChildren; i++ { + <-childClosing + t.Logf("closed %d", i) + } + + rl.Close() // ensure everyone's closed. +} diff --git a/vendor/github.com/libp2p/go-buffer-pool/.travis.yml b/vendor/github.com/libp2p/go-buffer-pool/.travis.yml new file mode 100644 index 0000000000..5163d693fc --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/libp2p/go-buffer-pool/LICENSE b/vendor/github.com/libp2p/go-buffer-pool/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/libp2p/go-buffer-pool/LICENSE-BSD b/vendor/github.com/libp2p/go-buffer-pool/LICENSE-BSD new file mode 100644 index 0000000000..97ece7897d --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/LICENSE-BSD @@ -0,0 +1,29 @@ +### Applies to buffer.go and buffer_test.go ### + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/libp2p/go-buffer-pool/README.md b/vendor/github.com/libp2p/go-buffer-pool/README.md new file mode 100644 index 0000000000..830cb56268 --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/README.md @@ -0,0 +1,53 @@ +go-buffer-pool +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/) +[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23libp2p) +[![codecov](https://codecov.io/gh/libp2p/go-buffer-pool/branch/master/graph/badge.svg)](https://codecov.io/gh/libp2p/go-buffer-pool) +[![Travis CI](https://travis-ci.org/libp2p/go-buffer-pool.svg?branch=master)](https://travis-ci.org/libp2p/go-buffer-pool) +[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) + +> A variable size buffer pool for go. + +## Table of Contents + +- [Use Case](#use-case) + - [Advantages over GC](#advantages-over-gc) + - [Disadvantages over GC:](#disadvantages-over-gc) +- [Contribute](#contribute) +- [License](#license) + +## Use Case + +Use this when you need to repeatedly allocate and free a bunch of temporary buffers of approximately the same size. + +### Advantages over GC + +* Reduces Memory Usage: + * We don't have to wait for a GC to run before we can reuse memory. This is essential if you're repeatedly allocating large short-lived buffers. + +* Reduces CPU usage: + * It takes some load off of the GC (due to buffer reuse). + * We don't have to zero buffers (fewer wasteful memory writes). + +### Disadvantages over GC: + +* Can leak memory contents. Unlike the go GC, we *don't* zero memory. +* All buffers have a capacity of a power of 2. This is fine if you either (a) actually need buffers with this size or (b) expect these buffers to be temporary. +* Requires that buffers be returned explicitly. This can lead to race conditions and memory corruption if the buffer is released while it's still in use. + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Protocol Labs +BSD © The Go Authors + +--- + +The last gx published version of this module was: 0.1.3: QmQDvJoB6aJWN3sjr3xsgXqKCXf4jU5zdMXpDMsBkYVNqa diff --git a/vendor/github.com/libp2p/go-buffer-pool/buffer.go b/vendor/github.com/libp2p/go-buffer-pool/buffer.go new file mode 100644 index 0000000000..2e4645a97d --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/buffer.go @@ -0,0 +1,302 @@ +// This is a derivitive work of Go's bytes.Buffer implementation. +// +// Originally copyright 2009 The Go Authors. All rights reserved. +// +// Modifications copyright 2018 Steven Allen. All rights reserved. +// +// Use of this source code is governed by both a BSD-style and an MIT-style +// license that can be found in the LICENSE_BSD and LICENSE files. + +package pool + +import ( + "io" +) + +// Buffer is a buffer like bytes.Buffer that: +// +// 1. Uses a buffer pool. +// 2. Frees memory on read. +// +// If you only have a few buffers and read/write at a steady rate, *don't* use +// this package, it'll be slower. +// +// However: +// +// 1. If you frequently create/destroy buffers, this implementation will be +// significantly nicer to the allocator. +// 2. If you have many buffers with bursty traffic, this implementation will use +// significantly less memory. +type Buffer struct { + // Pool is the buffer pool to use. If nil, this Buffer will use the + // global buffer pool. + Pool *BufferPool + + buf []byte + rOff int + + // Preallocated slice for samll reads/writes. + // This is *really* important for performance and only costs 8 words. + bootstrap [64]byte +} + +// NewBuffer constructs a new buffer initialized to `buf`. +// Unlike `bytes.Buffer`, we *copy* the buffer but don't reuse it (to ensure +// that we *only* use buffers from the pool). +func NewBuffer(buf []byte) *Buffer { + b := new(Buffer) + if len(buf) > 0 { + b.buf = b.getBuf(len(buf)) + copy(b.buf, buf) + } + return b +} + +// NewBufferString is identical to NewBuffer *except* that it allows one to +// initialize the buffer from a string (without having to allocate an +// intermediate bytes slice). +func NewBufferString(buf string) *Buffer { + b := new(Buffer) + if len(buf) > 0 { + b.buf = b.getBuf(len(buf)) + copy(b.buf, buf) + } + return b +} + +func (b *Buffer) grow(n int) int { + wOff := len(b.buf) + bCap := cap(b.buf) + + if bCap >= wOff+n { + b.buf = b.buf[:wOff+n] + return wOff + } + + bSize := b.Len() + + minCap := 2*bSize + n + + // Slide if cap >= minCap. + // Reallocate otherwise. + if bCap >= minCap { + copy(b.buf, b.buf[b.rOff:]) + } else { + // Needs new buffer. + newBuf := b.getBuf(minCap) + copy(newBuf, b.buf[b.rOff:]) + b.returnBuf() + b.buf = newBuf + } + + b.rOff = 0 + b.buf = b.buf[:bSize+n] + return bSize +} + +func (b *Buffer) getPool() *BufferPool { + if b.Pool == nil { + return GlobalPool + } + return b.Pool +} + +func (b *Buffer) returnBuf() { + if cap(b.buf) > len(b.bootstrap) { + b.getPool().Put(b.buf) + } + b.buf = nil +} + +func (b *Buffer) getBuf(n int) []byte { + if n <= len(b.bootstrap) { + return b.bootstrap[:n] + } + return b.getPool().Get(n) +} + +// Len returns the number of bytes that can be read from this buffer. +func (b *Buffer) Len() int { + return len(b.buf) - b.rOff +} + +// Cap returns the current capacity of the buffer. +// +// Note: Buffer *may* re-allocate when writing (or growing by) `n` bytes even if +// `Cap() < Len() + n` to avoid excessive copying. +func (b *Buffer) Cap() int { + return cap(b.buf) +} + +// Bytes returns the slice of bytes currently buffered in the Buffer. +// +// The buffer returned by Bytes is valid until the next call grow, truncate, +// read, or write. Really, just don't touch the Buffer until you're done with +// the return value of this function. +func (b *Buffer) Bytes() []byte { + return b.buf[b.rOff:] +} + +// String returns the string representation of the buffer. +// +// It returns `` the buffer is a nil pointer. +func (b *Buffer) String() string { + if b == nil { + return "" + } + return string(b.buf[b.rOff:]) +} + +// WriteString writes a string to the buffer. +// +// This function is identical to Write except that it allows one to write a +// string directly without allocating an intermediate byte slice. +func (b *Buffer) WriteString(buf string) (int, error) { + wOff := b.grow(len(buf)) + return copy(b.buf[wOff:], buf), nil +} + +// Truncate truncates the Buffer. +// +// Panics if `n > b.Len()`. +// +// This function may free memory by shrinking the internal buffer. +func (b *Buffer) Truncate(n int) { + if n < 0 || n > b.Len() { + panic("truncation out of range") + } + b.buf = b.buf[:b.rOff+n] + b.shrink() +} + +// Reset is equivalent to Truncate(0). +func (b *Buffer) Reset() { + b.returnBuf() + b.rOff = 0 +} + +// ReadByte reads a single byte from the Buffer. +func (b *Buffer) ReadByte() (byte, error) { + if b.rOff >= len(b.buf) { + return 0, io.EOF + } + c := b.buf[b.rOff] + b.rOff++ + return c, nil +} + +// WriteByte writes a single byte to the Buffer. +func (b *Buffer) WriteByte(c byte) error { + wOff := b.grow(1) + b.buf[wOff] = c + return nil +} + +// Grow grows the internal buffer such that `n` bytes can be written without +// reallocating. +func (b *Buffer) Grow(n int) { + wOff := b.grow(n) + b.buf = b.buf[:wOff] +} + +// Next is an alternative to `Read` that returns a byte slice instead of taking +// one. +// +// The returned byte slice is valid until the next read, write, grow, or +// truncate. +func (b *Buffer) Next(n int) []byte { + m := b.Len() + if m < n { + n = m + } + data := b.buf[b.rOff : b.rOff+n] + b.rOff += n + return data +} + +// Write writes the byte slice to the buffer. +func (b *Buffer) Write(buf []byte) (int, error) { + wOff := b.grow(len(buf)) + return copy(b.buf[wOff:], buf), nil +} + +// WriteTo copies from the buffer into the given writer until the buffer is +// empty. +func (b *Buffer) WriteTo(w io.Writer) (int64, error) { + if b.rOff < len(b.buf) { + n, err := w.Write(b.buf[b.rOff:]) + b.rOff += n + if b.rOff > len(b.buf) { + panic("invalid write count") + } + b.shrink() + return int64(n), err + } + return 0, nil +} + +// MinRead is the minimum slice size passed to a Read call by +// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond +// what is required to hold the contents of r, ReadFrom will not grow the +// underlying buffer. +const MinRead = 512 + +// ReadFrom reads from the given reader into the buffer. +func (b *Buffer) ReadFrom(r io.Reader) (int64, error) { + n := int64(0) + for { + wOff := b.grow(MinRead) + // Use *entire* buffer. + b.buf = b.buf[:cap(b.buf)] + + read, err := r.Read(b.buf[wOff:]) + b.buf = b.buf[:wOff+read] + n += int64(read) + switch err { + case nil: + case io.EOF: + err = nil + fallthrough + default: + b.shrink() + return n, err + } + } +} + +// Read reads at most `len(buf)` bytes from the internal buffer into the given +// buffer. +func (b *Buffer) Read(buf []byte) (int, error) { + if len(buf) == 0 { + return 0, nil + } + if b.rOff >= len(b.buf) { + return 0, io.EOF + } + n := copy(buf, b.buf[b.rOff:]) + b.rOff += n + b.shrink() + return n, nil +} + +func (b *Buffer) shrink() { + c := b.Cap() + // Either nil or bootstrap. + if c <= len(b.bootstrap) { + return + } + + l := b.Len() + if l == 0 { + // Shortcut if empty. + b.returnBuf() + b.rOff = 0 + } else if l*8 < c { + // Only shrink when capacity > 8x length. Avoids shrinking too aggressively. + newBuf := b.getBuf(l) + copy(newBuf, b.buf[b.rOff:]) + b.returnBuf() + b.rOff = 0 + b.buf = newBuf[:l] + } +} diff --git a/vendor/github.com/libp2p/go-buffer-pool/codecov.yml b/vendor/github.com/libp2p/go-buffer-pool/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/libp2p/go-buffer-pool/go.mod b/vendor/github.com/libp2p/go-buffer-pool/go.mod new file mode 100644 index 0000000000..02b140dddb --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/go.mod @@ -0,0 +1,3 @@ +module github.com/libp2p/go-buffer-pool + +go 1.12 diff --git a/vendor/github.com/libp2p/go-buffer-pool/pool.go b/vendor/github.com/libp2p/go-buffer-pool/pool.go new file mode 100644 index 0000000000..d812840aaa --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/pool.go @@ -0,0 +1,115 @@ +// Package pool provides a sync.Pool equivalent that buckets incoming +// requests to one of 32 sub-pools, one for each power of 2, 0-32. +// +// import (pool "github.com/libp2p/go-buffer-pool") +// var p pool.BufferPool +// +// small := make([]byte, 1024) +// large := make([]byte, 4194304) +// p.Put(small) +// p.Put(large) +// +// small2 := p.Get(1024) +// large2 := p.Get(4194304) +// fmt.Println("small2 len:", len(small2)) +// fmt.Println("large2 len:", len(large2)) +// +// // Output: +// // small2 len: 1024 +// // large2 len: 4194304 +// +package pool + +import ( + "math" + "math/bits" + "sync" +) + +// GlobalPool is a static Pool for reusing byteslices of various sizes. +var GlobalPool = new(BufferPool) + +// MaxLength is the maximum length of an element that can be added to the Pool. +const MaxLength = math.MaxInt32 + +// BufferPool is a pool to handle cases of reusing elements of varying sizes. It +// maintains 32 internal pools, for each power of 2 in 0-32. +// +// You should generally just call the package level Get and Put methods or use +// the GlobalPool BufferPool instead of constructing your own. +// +// You MUST NOT copy Pool after using. +type BufferPool struct { + pools [32]sync.Pool // a list of singlePools + ptrs sync.Pool +} + +type bufp struct { + buf []byte +} + +// Get retrieves a buffer of the appropriate length from the buffer pool or +// allocates a new one. Get may choose to ignore the pool and treat it as empty. +// Callers should not assume any relation between values passed to Put and the +// values returned by Get. +// +// If no suitable buffer exists in the pool, Get creates one. +func (p *BufferPool) Get(length int) []byte { + if length == 0 { + return nil + } + if length > MaxLength { + return make([]byte, length) + } + idx := nextLogBase2(uint32(length)) + if ptr := p.pools[idx].Get(); ptr != nil { + bp := ptr.(*bufp) + buf := bp.buf[:uint32(length)] + bp.buf = nil + p.ptrs.Put(ptr) + return buf + } + return make([]byte, 1< MaxLength { + return // drop it + } + idx := prevLogBase2(uint32(capacity)) + var bp *bufp + if ptr := p.ptrs.Get(); ptr != nil { + bp = ptr.(*bufp) + } else { + bp = new(bufp) + } + bp.buf = buf + p.pools[idx].Put(bp) +} + +// Get retrieves a buffer of the appropriate length from the global buffer pool +// (or allocates a new one). +func Get(length int) []byte { + return GlobalPool.Get(length) +} + +// Put returns a buffer to the global buffer pool. +func Put(slice []byte) { + GlobalPool.Put(slice) +} + +// Log of base two, round up (for v > 0). +func nextLogBase2(v uint32) uint32 { + return uint32(bits.Len32(v - 1)) +} + +// Log of base two, round down (for v > 0) +func prevLogBase2(num uint32) uint32 { + next := nextLogBase2(num) + if num == (1 << uint32(next)) { + return next + } + return next - 1 +} diff --git a/vendor/github.com/libp2p/go-buffer-pool/writer.go b/vendor/github.com/libp2p/go-buffer-pool/writer.go new file mode 100644 index 0000000000..cea83f9237 --- /dev/null +++ b/vendor/github.com/libp2p/go-buffer-pool/writer.go @@ -0,0 +1,119 @@ +package pool + +import ( + "bufio" + "io" + "sync" +) + +const WriterBufferSize = 4096 + +var bufioWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, WriterBufferSize) + }, +} + +// Writer is a buffered writer that returns its internal buffer in a pool when +// not in use. +type Writer struct { + W io.Writer + bufw *bufio.Writer +} + +func (w *Writer) ensureBuffer() { + if w.bufw == nil { + w.bufw = bufioWriterPool.Get().(*bufio.Writer) + w.bufw.Reset(w.W) + } +} + +// Write writes the given byte slice to the underlying connection. +// +// Note: Write won't return the write buffer to the pool even if it ends up +// being empty after the write. You must call Flush() to do that. +func (w *Writer) Write(b []byte) (int, error) { + if w.bufw == nil { + if len(b) >= WriterBufferSize { + return w.W.Write(b) + } + w.bufw = bufioWriterPool.Get().(*bufio.Writer) + w.bufw.Reset(w.W) + } + return w.bufw.Write(b) +} + +// Size returns the size of the underlying buffer. +func (w *Writer) Size() int { + return WriterBufferSize +} + +// Available returns the amount buffer space available. +func (w *Writer) Available() int { + if w.bufw != nil { + return w.bufw.Available() + } + return WriterBufferSize +} + +// Buffered returns the amount of data buffered. +func (w *Writer) Buffered() int { + if w.bufw != nil { + return w.bufw.Buffered() + } + return 0 +} + +// WriteByte writes a single byte. +func (w *Writer) WriteByte(b byte) error { + w.ensureBuffer() + return w.bufw.WriteByte(b) +} + +// WriteRune writes a single rune, returning the number of bytes written. +func (w *Writer) WriteRune(r rune) (int, error) { + w.ensureBuffer() + return w.bufw.WriteRune(r) +} + +// WriteString writes a string, returning the number of bytes written. +func (w *Writer) WriteString(s string) (int, error) { + w.ensureBuffer() + return w.bufw.WriteString(s) +} + +// Flush flushes the write buffer, if any, and returns it to the pool. +func (w *Writer) Flush() error { + if w.bufw == nil { + return nil + } + if err := w.bufw.Flush(); err != nil { + return err + } + w.bufw.Reset(nil) + bufioWriterPool.Put(w.bufw) + w.bufw = nil + return nil +} + +// Close flushes the underlying writer and closes it if it implements the +// io.Closer interface. +// +// Note: Close() closes the writer even if Flush() fails to avoid leaking system +// resources. If you want to make sure Flush() succeeds, call it first. +func (w *Writer) Close() error { + var ( + ferr, cerr error + ) + ferr = w.Flush() + + // always close even if flush fails. + if closer, ok := w.W.(io.Closer); ok { + cerr = closer.Close() + } + + if ferr != nil { + return ferr + } + return cerr +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/LICENSE b/vendor/github.com/libp2p/go-libp2p-core/LICENSE new file mode 100644 index 0000000000..770d1744d1 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/LICENSE @@ -0,0 +1,4 @@ +Dual-licensed under MIT and ASLv2, by way of the [Permissive License Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/). + +Apache-2.0: https://www.apache.org/licenses/license-2.0 +MIT: https://www.opensource.org/licenses/mit diff --git a/vendor/github.com/libp2p/go-libp2p-core/LICENSE-APACHE b/vendor/github.com/libp2p/go-libp2p-core/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/libp2p/go-libp2p-core/LICENSE-MIT b/vendor/github.com/libp2p/go-libp2p-core/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/ecdsa.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/ecdsa.go new file mode 100644 index 0000000000..3b7a425a5d --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/ecdsa.go @@ -0,0 +1,177 @@ +package crypto + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/asn1" + "errors" + "io" + "math/big" + + pb "github.com/libp2p/go-libp2p-core/crypto/pb" + + sha256 "github.com/minio/sha256-simd" +) + +// ECDSAPrivateKey is an implementation of an ECDSA private key +type ECDSAPrivateKey struct { + priv *ecdsa.PrivateKey +} + +// ECDSAPublicKey is an implementation of an ECDSA public key +type ECDSAPublicKey struct { + pub *ecdsa.PublicKey +} + +// ECDSASig holds the r and s values of an ECDSA signature +type ECDSASig struct { + R, S *big.Int +} + +var ( + // ErrNotECDSAPubKey is returned when the public key passed is not an ecdsa public key + ErrNotECDSAPubKey = errors.New("not an ecdsa public key") + // ErrNilSig is returned when the signature is nil + ErrNilSig = errors.New("sig is nil") + // ErrNilPrivateKey is returned when a nil private key is provided + ErrNilPrivateKey = errors.New("private key is nil") + // ErrNilPublicKey is returned when a nil public key is provided + ErrNilPublicKey = errors.New("public key is nil") + // ECDSACurve is the default ecdsa curve used + ECDSACurve = elliptic.P256() +) + +// GenerateECDSAKeyPair generates a new ecdsa private and public key +func GenerateECDSAKeyPair(src io.Reader) (PrivKey, PubKey, error) { + return GenerateECDSAKeyPairWithCurve(ECDSACurve, src) +} + +// GenerateECDSAKeyPairWithCurve generates a new ecdsa private and public key with a speicified curve +func GenerateECDSAKeyPairWithCurve(curve elliptic.Curve, src io.Reader) (PrivKey, PubKey, error) { + priv, err := ecdsa.GenerateKey(curve, src) + if err != nil { + return nil, nil, err + } + + return &ECDSAPrivateKey{priv}, &ECDSAPublicKey{&priv.PublicKey}, nil +} + +// ECDSAKeyPairFromKey generates a new ecdsa private and public key from an input private key +func ECDSAKeyPairFromKey(priv *ecdsa.PrivateKey) (PrivKey, PubKey, error) { + if priv == nil { + return nil, nil, ErrNilPrivateKey + } + + return &ECDSAPrivateKey{priv}, &ECDSAPublicKey{&priv.PublicKey}, nil +} + +// MarshalECDSAPrivateKey returns x509 bytes from a private key +func MarshalECDSAPrivateKey(ePriv ECDSAPrivateKey) ([]byte, error) { + return x509.MarshalECPrivateKey(ePriv.priv) +} + +// MarshalECDSAPublicKey returns x509 bytes from a public key +func MarshalECDSAPublicKey(ePub ECDSAPublicKey) ([]byte, error) { + return x509.MarshalPKIXPublicKey(ePub.pub) +} + +// UnmarshalECDSAPrivateKey returns a private key from x509 bytes +func UnmarshalECDSAPrivateKey(data []byte) (PrivKey, error) { + priv, err := x509.ParseECPrivateKey(data) + if err != nil { + return nil, err + } + + return &ECDSAPrivateKey{priv}, nil +} + +// UnmarshalECDSAPublicKey returns the public key from x509 bytes +func UnmarshalECDSAPublicKey(data []byte) (PubKey, error) { + pubIfc, err := x509.ParsePKIXPublicKey(data) + if err != nil { + return nil, err + } + + pub, ok := pubIfc.(*ecdsa.PublicKey) + if !ok { + return nil, ErrNotECDSAPubKey + } + + return &ECDSAPublicKey{pub}, nil +} + +// Bytes returns the private key as protobuf bytes +func (ePriv *ECDSAPrivateKey) Bytes() ([]byte, error) { + return MarshalPrivateKey(ePriv) +} + +// Type returns the key type +func (ePriv *ECDSAPrivateKey) Type() pb.KeyType { + return pb.KeyType_ECDSA +} + +// Raw returns x509 bytes from a private key +func (ePriv *ECDSAPrivateKey) Raw() ([]byte, error) { + return x509.MarshalECPrivateKey(ePriv.priv) +} + +// Equals compares two private keys +func (ePriv *ECDSAPrivateKey) Equals(o Key) bool { + return basicEquals(ePriv, o) +} + +// Sign returns the signature of the input data +func (ePriv *ECDSAPrivateKey) Sign(data []byte) ([]byte, error) { + hash := sha256.Sum256(data) + r, s, err := ecdsa.Sign(rand.Reader, ePriv.priv, hash[:]) + if err != nil { + return nil, err + } + + return asn1.Marshal(ECDSASig{ + R: r, + S: s, + }) +} + +// GetPublic returns a public key +func (ePriv *ECDSAPrivateKey) GetPublic() PubKey { + return &ECDSAPublicKey{&ePriv.priv.PublicKey} +} + +// Bytes returns the public key as protobuf bytes +func (ePub *ECDSAPublicKey) Bytes() ([]byte, error) { + return MarshalPublicKey(ePub) +} + +// Type returns the key type +func (ePub *ECDSAPublicKey) Type() pb.KeyType { + return pb.KeyType_ECDSA +} + +// Raw returns x509 bytes from a public key +func (ePub *ECDSAPublicKey) Raw() ([]byte, error) { + return x509.MarshalPKIXPublicKey(ePub.pub) +} + +// Equals compares to public keys +func (ePub *ECDSAPublicKey) Equals(o Key) bool { + return basicEquals(ePub, o) +} + +// Verify compares data to a signature +func (ePub *ECDSAPublicKey) Verify(data, sigBytes []byte) (bool, error) { + sig := new(ECDSASig) + if _, err := asn1.Unmarshal(sigBytes, sig); err != nil { + return false, err + } + if sig == nil { + return false, ErrNilSig + } + + hash := sha256.Sum256(data) + + return ecdsa.Verify(ePub.pub, hash[:], sig.R, sig.S), nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/ed25519.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/ed25519.go new file mode 100644 index 0000000000..1707e7509f --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/ed25519.go @@ -0,0 +1,155 @@ +package crypto + +import ( + "bytes" + "crypto/ed25519" + "crypto/subtle" + "errors" + "fmt" + "io" + + pb "github.com/libp2p/go-libp2p-core/crypto/pb" +) + +// Ed25519PrivateKey is an ed25519 private key. +type Ed25519PrivateKey struct { + k ed25519.PrivateKey +} + +// Ed25519PublicKey is an ed25519 public key. +type Ed25519PublicKey struct { + k ed25519.PublicKey +} + +// GenerateEd25519Key generates a new ed25519 private and public key pair. +func GenerateEd25519Key(src io.Reader) (PrivKey, PubKey, error) { + pub, priv, err := ed25519.GenerateKey(src) + if err != nil { + return nil, nil, err + } + + return &Ed25519PrivateKey{ + k: priv, + }, + &Ed25519PublicKey{ + k: pub, + }, + nil +} + +// Type of the private key (Ed25519). +func (k *Ed25519PrivateKey) Type() pb.KeyType { + return pb.KeyType_Ed25519 +} + +// Bytes marshals an ed25519 private key to protobuf bytes. +func (k *Ed25519PrivateKey) Bytes() ([]byte, error) { + return MarshalPrivateKey(k) +} + +// Raw private key bytes. +func (k *Ed25519PrivateKey) Raw() ([]byte, error) { + // The Ed25519 private key contains two 32-bytes curve points, the private + // key and the public key. + // It makes it more efficient to get the public key without re-computing an + // elliptic curve multiplication. + buf := make([]byte, len(k.k)) + copy(buf, k.k) + + return buf, nil +} + +func (k *Ed25519PrivateKey) pubKeyBytes() []byte { + return k.k[ed25519.PrivateKeySize-ed25519.PublicKeySize:] +} + +// Equals compares two ed25519 private keys. +func (k *Ed25519PrivateKey) Equals(o Key) bool { + edk, ok := o.(*Ed25519PrivateKey) + if !ok { + return basicEquals(k, o) + } + + return subtle.ConstantTimeCompare(k.k, edk.k) == 1 +} + +// GetPublic returns an ed25519 public key from a private key. +func (k *Ed25519PrivateKey) GetPublic() PubKey { + return &Ed25519PublicKey{k: k.pubKeyBytes()} +} + +// Sign returns a signature from an input message. +func (k *Ed25519PrivateKey) Sign(msg []byte) ([]byte, error) { + return ed25519.Sign(k.k, msg), nil +} + +// Type of the public key (Ed25519). +func (k *Ed25519PublicKey) Type() pb.KeyType { + return pb.KeyType_Ed25519 +} + +// Bytes returns a ed25519 public key as protobuf bytes. +func (k *Ed25519PublicKey) Bytes() ([]byte, error) { + return MarshalPublicKey(k) +} + +// Raw public key bytes. +func (k *Ed25519PublicKey) Raw() ([]byte, error) { + return k.k, nil +} + +// Equals compares two ed25519 public keys. +func (k *Ed25519PublicKey) Equals(o Key) bool { + edk, ok := o.(*Ed25519PublicKey) + if !ok { + return basicEquals(k, o) + } + + return bytes.Equal(k.k, edk.k) +} + +// Verify checks a signature agains the input data. +func (k *Ed25519PublicKey) Verify(data []byte, sig []byte) (bool, error) { + return ed25519.Verify(k.k, data, sig), nil +} + +// UnmarshalEd25519PublicKey returns a public key from input bytes. +func UnmarshalEd25519PublicKey(data []byte) (PubKey, error) { + if len(data) != 32 { + return nil, errors.New("expect ed25519 public key data size to be 32") + } + + return &Ed25519PublicKey{ + k: ed25519.PublicKey(data), + }, nil +} + +// UnmarshalEd25519PrivateKey returns a private key from input bytes. +func UnmarshalEd25519PrivateKey(data []byte) (PrivKey, error) { + switch len(data) { + case ed25519.PrivateKeySize + ed25519.PublicKeySize: + // Remove the redundant public key. See issue #36. + redundantPk := data[ed25519.PrivateKeySize:] + pk := data[ed25519.PrivateKeySize-ed25519.PublicKeySize : ed25519.PrivateKeySize] + if subtle.ConstantTimeCompare(pk, redundantPk) == 0 { + return nil, errors.New("expected redundant ed25519 public key to be redundant") + } + + // No point in storing the extra data. + newKey := make([]byte, ed25519.PrivateKeySize) + copy(newKey, data[:ed25519.PrivateKeySize]) + data = newKey + case ed25519.PrivateKeySize: + default: + return nil, fmt.Errorf( + "expected ed25519 data size to be %d or %d, got %d", + ed25519.PrivateKeySize, + ed25519.PrivateKeySize+ed25519.PublicKeySize, + len(data), + ) + } + + return &Ed25519PrivateKey{ + k: ed25519.PrivateKey(data), + }, nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/key.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/key.go new file mode 100644 index 0000000000..090338810f --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/key.go @@ -0,0 +1,399 @@ +// Package crypto implements various cryptographic utilities used by libp2p. +// This includes a Public and Private key interface and key implementations +// for supported key algorithms. +package crypto + +import ( + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "crypto/sha512" + "crypto/subtle" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + + pb "github.com/libp2p/go-libp2p-core/crypto/pb" + + "github.com/gogo/protobuf/proto" + sha256 "github.com/minio/sha256-simd" +) + +const ( + // RSA is an enum for the supported RSA key type + RSA = iota + // Ed25519 is an enum for the supported Ed25519 key type + Ed25519 + // Secp256k1 is an enum for the supported Secp256k1 key type + Secp256k1 + // ECDSA is an enum for the supported ECDSA key type + ECDSA +) + +var ( + // ErrBadKeyType is returned when a key is not supported + ErrBadKeyType = errors.New("invalid or unsupported key type") + // KeyTypes is a list of supported keys + KeyTypes = []int{ + RSA, + Ed25519, + Secp256k1, + ECDSA, + } +) + +// PubKeyUnmarshaller is a func that creates a PubKey from a given slice of bytes +type PubKeyUnmarshaller func(data []byte) (PubKey, error) + +// PrivKeyUnmarshaller is a func that creates a PrivKey from a given slice of bytes +type PrivKeyUnmarshaller func(data []byte) (PrivKey, error) + +// PubKeyUnmarshallers is a map of unmarshallers by key type +var PubKeyUnmarshallers = map[pb.KeyType]PubKeyUnmarshaller{ + pb.KeyType_RSA: UnmarshalRsaPublicKey, + pb.KeyType_Ed25519: UnmarshalEd25519PublicKey, + pb.KeyType_Secp256k1: UnmarshalSecp256k1PublicKey, + pb.KeyType_ECDSA: UnmarshalECDSAPublicKey, +} + +// PrivKeyUnmarshallers is a map of unmarshallers by key type +var PrivKeyUnmarshallers = map[pb.KeyType]PrivKeyUnmarshaller{ + pb.KeyType_RSA: UnmarshalRsaPrivateKey, + pb.KeyType_Ed25519: UnmarshalEd25519PrivateKey, + pb.KeyType_Secp256k1: UnmarshalSecp256k1PrivateKey, + pb.KeyType_ECDSA: UnmarshalECDSAPrivateKey, +} + +// Key represents a crypto key that can be compared to another key +type Key interface { + // Bytes returns a serialized, storeable representation of this key + // DEPRECATED in favor of Marshal / Unmarshal + Bytes() ([]byte, error) + + // Equals checks whether two PubKeys are the same + Equals(Key) bool + + // Raw returns the raw bytes of the key (not wrapped in the + // libp2p-crypto protobuf). + // + // This function is the inverse of {Priv,Pub}KeyUnmarshaler. + Raw() ([]byte, error) + + // Type returns the protobof key type. + Type() pb.KeyType +} + +// PrivKey represents a private key that can be used to generate a public key and sign data +type PrivKey interface { + Key + + // Cryptographically sign the given bytes + Sign([]byte) ([]byte, error) + + // Return a public key paired with this private key + GetPublic() PubKey +} + +// PubKey is a public key that can be used to verifiy data signed with the corresponding private key +type PubKey interface { + Key + + // Verify that 'sig' is the signed hash of 'data' + Verify(data []byte, sig []byte) (bool, error) +} + +// GenSharedKey generates the shared key from a given private key +type GenSharedKey func([]byte) ([]byte, error) + +// GenerateKeyPair generates a private and public key +func GenerateKeyPair(typ, bits int) (PrivKey, PubKey, error) { + return GenerateKeyPairWithReader(typ, bits, rand.Reader) +} + +// GenerateKeyPairWithReader returns a keypair of the given type and bitsize +func GenerateKeyPairWithReader(typ, bits int, src io.Reader) (PrivKey, PubKey, error) { + switch typ { + case RSA: + return GenerateRSAKeyPair(bits, src) + case Ed25519: + return GenerateEd25519Key(src) + case Secp256k1: + return GenerateSecp256k1Key(src) + case ECDSA: + return GenerateECDSAKeyPair(src) + default: + return nil, nil, ErrBadKeyType + } +} + +// GenerateEKeyPair returns an ephemeral public key and returns a function that will compute +// the shared secret key. Used in the identify module. +// +// Focuses only on ECDH now, but can be made more general in the future. +func GenerateEKeyPair(curveName string) ([]byte, GenSharedKey, error) { + var curve elliptic.Curve + + switch curveName { + case "P-256": + curve = elliptic.P256() + case "P-384": + curve = elliptic.P384() + case "P-521": + curve = elliptic.P521() + default: + return nil, nil, fmt.Errorf("unknown curve name") + } + + priv, x, y, err := elliptic.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, nil, err + } + + pubKey := elliptic.Marshal(curve, x, y) + + done := func(theirPub []byte) ([]byte, error) { + // Verify and unpack node's public key. + x, y := elliptic.Unmarshal(curve, theirPub) + if x == nil { + return nil, fmt.Errorf("malformed public key: %d %v", len(theirPub), theirPub) + } + + if !curve.IsOnCurve(x, y) { + return nil, errors.New("invalid public key") + } + + // Generate shared secret. + secret, _ := curve.ScalarMult(x, y, priv) + + return secret.Bytes(), nil + } + + return pubKey, done, nil +} + +// StretchedKeys ... +type StretchedKeys struct { + IV []byte + MacKey []byte + CipherKey []byte +} + +// PENDING DEPRECATION: KeyStretcher() will be deprecated with secio; for new +// code, please use PBKDF2 (golang.org/x/crypto/pbkdf2) instead. +// KeyStretcher returns a set of keys for each party by stretching the shared key. +// (myIV, theirIV, myCipherKey, theirCipherKey, myMACKey, theirMACKey). +// This function accepts the following cipher types: +// - AES-128 +// - AES-256 +// The function will panic upon receiving an unknown cipherType +func KeyStretcher(cipherType string, hashType string, secret []byte) (StretchedKeys, StretchedKeys) { + var cipherKeySize int + var ivSize int + switch cipherType { + case "AES-128": + ivSize = 16 + cipherKeySize = 16 + case "AES-256": + ivSize = 16 + cipherKeySize = 32 + default: + panic("Unrecognized cipher, programmer error?") + } + + hmacKeySize := 20 + + seed := []byte("key expansion") + + result := make([]byte, 2*(ivSize+cipherKeySize+hmacKeySize)) + + var h func() hash.Hash + + switch hashType { + case "SHA1": + h = sha1.New + case "SHA256": + h = sha256.New + case "SHA512": + h = sha512.New + default: + panic("Unrecognized hash function, programmer error?") + } + + m := hmac.New(h, secret) + // note: guaranteed to never return an error + m.Write(seed) + + a := m.Sum(nil) + + j := 0 + for j < len(result) { + m.Reset() + + // note: guaranteed to never return an error. + m.Write(a) + m.Write(seed) + + b := m.Sum(nil) + + todo := len(b) + + if j+todo > len(result) { + todo = len(result) - j + } + + copy(result[j:j+todo], b) + + j += todo + + m.Reset() + + // note: guaranteed to never return an error. + m.Write(a) + + a = m.Sum(nil) + } + + half := len(result) / 2 + r1 := result[:half] + r2 := result[half:] + + var k1 StretchedKeys + var k2 StretchedKeys + + k1.IV = r1[0:ivSize] + k1.CipherKey = r1[ivSize : ivSize+cipherKeySize] + k1.MacKey = r1[ivSize+cipherKeySize:] + + k2.IV = r2[0:ivSize] + k2.CipherKey = r2[ivSize : ivSize+cipherKeySize] + k2.MacKey = r2[ivSize+cipherKeySize:] + + return k1, k2 +} + +// UnmarshalPublicKey converts a protobuf serialized public key into its +// representative object +func UnmarshalPublicKey(data []byte) (PubKey, error) { + pmes := new(pb.PublicKey) + err := proto.Unmarshal(data, pmes) + if err != nil { + return nil, err + } + + return PublicKeyFromProto(pmes) +} + +// PublicKeyFromProto converts an unserialized protobuf PublicKey message +// into its representative object. +func PublicKeyFromProto(pmes *pb.PublicKey) (PubKey, error) { + um, ok := PubKeyUnmarshallers[pmes.GetType()] + if !ok { + return nil, ErrBadKeyType + } + + data := pmes.GetData() + + pk, err := um(data) + if err != nil { + return nil, err + } + + switch tpk := pk.(type) { + case *RsaPublicKey: + tpk.cached, _ = pmes.Marshal() + } + + return pk, nil +} + +// MarshalPublicKey converts a public key object into a protobuf serialized +// public key +func MarshalPublicKey(k PubKey) ([]byte, error) { + pbmes, err := PublicKeyToProto(k) + if err != nil { + return nil, err + } + + return proto.Marshal(pbmes) +} + +// PublicKeyToProto converts a public key object into an unserialized +// protobuf PublicKey message. +func PublicKeyToProto(k PubKey) (*pb.PublicKey, error) { + pbmes := new(pb.PublicKey) + pbmes.Type = k.Type() + data, err := k.Raw() + if err != nil { + return nil, err + } + pbmes.Data = data + return pbmes, nil +} + +// UnmarshalPrivateKey converts a protobuf serialized private key into its +// representative object +func UnmarshalPrivateKey(data []byte) (PrivKey, error) { + pmes := new(pb.PrivateKey) + err := proto.Unmarshal(data, pmes) + if err != nil { + return nil, err + } + + um, ok := PrivKeyUnmarshallers[pmes.GetType()] + if !ok { + return nil, ErrBadKeyType + } + + return um(pmes.GetData()) +} + +// MarshalPrivateKey converts a key object into its protobuf serialized form. +func MarshalPrivateKey(k PrivKey) ([]byte, error) { + pbmes := new(pb.PrivateKey) + pbmes.Type = k.Type() + data, err := k.Raw() + if err != nil { + return nil, err + } + + pbmes.Data = data + return proto.Marshal(pbmes) +} + +// ConfigDecodeKey decodes from b64 (for config file) to a byte array that can be unmarshalled. +func ConfigDecodeKey(b string) ([]byte, error) { + return base64.StdEncoding.DecodeString(b) +} + +// ConfigEncodeKey encodes a marshalled key to b64 (for config file). +func ConfigEncodeKey(b []byte) string { + return base64.StdEncoding.EncodeToString(b) +} + +// KeyEqual checks whether two Keys are equivalent (have identical byte representations). +func KeyEqual(k1, k2 Key) bool { + if k1 == k2 { + return true + } + + return k1.Equals(k2) +} + +func basicEquals(k1, k2 Key) bool { + if k1.Type() != k2.Type() { + return false + } + + a, err := k1.Raw() + if err != nil { + return false + } + b, err := k2.Raw() + if err != nil { + return false + } + return subtle.ConstantTimeCompare(a, b) == 1 +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/key_not_openssl.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/key_not_openssl.go new file mode 100644 index 0000000000..1499feaab7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/key_not_openssl.go @@ -0,0 +1,80 @@ +// +build !openssl + +package crypto + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + + btcec "github.com/btcsuite/btcd/btcec" +) + +// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p-core/crypto keys +func KeyPairFromStdKey(priv crypto.PrivateKey) (PrivKey, PubKey, error) { + if priv == nil { + return nil, nil, ErrNilPrivateKey + } + + switch p := priv.(type) { + case *rsa.PrivateKey: + return &RsaPrivateKey{*p}, &RsaPublicKey{k: p.PublicKey}, nil + + case *ecdsa.PrivateKey: + return &ECDSAPrivateKey{p}, &ECDSAPublicKey{&p.PublicKey}, nil + + case *ed25519.PrivateKey: + pubIfc := p.Public() + pub, _ := pubIfc.(ed25519.PublicKey) + return &Ed25519PrivateKey{*p}, &Ed25519PublicKey{pub}, nil + + case *btcec.PrivateKey: + sPriv := Secp256k1PrivateKey(*p) + sPub := Secp256k1PublicKey(*p.PubKey()) + return &sPriv, &sPub, nil + + default: + return nil, nil, ErrBadKeyType + } +} + +// PrivKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) private keys +func PrivKeyToStdKey(priv PrivKey) (crypto.PrivateKey, error) { + if priv == nil { + return nil, ErrNilPrivateKey + } + + switch p := priv.(type) { + case *RsaPrivateKey: + return &p.sk, nil + case *ECDSAPrivateKey: + return p.priv, nil + case *Ed25519PrivateKey: + return &p.k, nil + case *Secp256k1PrivateKey: + return p, nil + default: + return nil, ErrBadKeyType + } +} + +// PubKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) public keys +func PubKeyToStdKey(pub PubKey) (crypto.PublicKey, error) { + if pub == nil { + return nil, ErrNilPublicKey + } + + switch p := pub.(type) { + case *RsaPublicKey: + return &p.k, nil + case *ECDSAPublicKey: + return p.pub, nil + case *Ed25519PublicKey: + return p.k, nil + case *Secp256k1PublicKey: + return p, nil + default: + return nil, ErrBadKeyType + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/key_openssl.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/key_openssl.go new file mode 100644 index 0000000000..8d7810ce6c --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/key_openssl.go @@ -0,0 +1,94 @@ +// +build openssl + +package crypto + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + + btcec "github.com/btcsuite/btcd/btcec" + openssl "github.com/libp2p/go-openssl" +) + +// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p-core/crypto keys +func KeyPairFromStdKey(priv crypto.PrivateKey) (PrivKey, PubKey, error) { + if priv == nil { + return nil, nil, ErrNilPrivateKey + } + + switch p := priv.(type) { + case *rsa.PrivateKey: + pk, err := openssl.LoadPrivateKeyFromDER(x509.MarshalPKCS1PrivateKey(p)) + if err != nil { + return nil, nil, err + } + + return &opensslPrivateKey{pk}, &opensslPublicKey{key: pk}, nil + + case *ecdsa.PrivateKey: + return &ECDSAPrivateKey{p}, &ECDSAPublicKey{&p.PublicKey}, nil + + case *ed25519.PrivateKey: + pubIfc := p.Public() + pub, _ := pubIfc.(ed25519.PublicKey) + return &Ed25519PrivateKey{*p}, &Ed25519PublicKey{pub}, nil + + case *btcec.PrivateKey: + sPriv := Secp256k1PrivateKey(*p) + sPub := Secp256k1PublicKey(*p.PubKey()) + return &sPriv, &sPub, nil + + default: + return nil, nil, ErrBadKeyType + } +} + +// PrivKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) private keys +func PrivKeyToStdKey(priv PrivKey) (crypto.PrivateKey, error) { + if priv == nil { + return nil, ErrNilPrivateKey + } + switch p := priv.(type) { + case *opensslPrivateKey: + raw, err := p.Raw() + if err != nil { + return nil, err + } + return x509.ParsePKCS1PrivateKey(raw) + case *ECDSAPrivateKey: + return p.priv, nil + case *Ed25519PrivateKey: + return &p.k, nil + case *Secp256k1PrivateKey: + return p, nil + default: + return nil, ErrBadKeyType + } +} + +// PubKeyToStdKey converts libp2p/go-libp2p-core/crypto private keys to standard library (and secp256k1) public keys +func PubKeyToStdKey(pub PubKey) (crypto.PublicKey, error) { + if pub == nil { + return nil, ErrNilPublicKey + } + + switch p := pub.(type) { + case *opensslPublicKey: + raw, err := p.Raw() + if err != nil { + return nil, err + } + return x509.ParsePKIXPublicKey(raw) + case *ECDSAPublicKey: + return p.pub, nil + case *Ed25519PublicKey: + return p.k, nil + case *Secp256k1PublicKey: + return p, nil + default: + return nil, ErrBadKeyType + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/openssl_common.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/openssl_common.go new file mode 100644 index 0000000000..88807cafd7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/openssl_common.go @@ -0,0 +1,119 @@ +// +build openssl + +package crypto + +import ( + "sync" + + pb "github.com/libp2p/go-libp2p-core/crypto/pb" + + openssl "github.com/libp2p/go-openssl" +) + +// define these as separate types so we can add more key types later and reuse +// code. + +type opensslPublicKey struct { + key openssl.PublicKey + + cacheLk sync.Mutex + cached []byte +} + +type opensslPrivateKey struct { + key openssl.PrivateKey +} + +func unmarshalOpensslPrivateKey(b []byte) (opensslPrivateKey, error) { + sk, err := openssl.LoadPrivateKeyFromDER(b) + if err != nil { + return opensslPrivateKey{}, err + } + return opensslPrivateKey{sk}, nil +} + +func unmarshalOpensslPublicKey(b []byte) (opensslPublicKey, error) { + sk, err := openssl.LoadPublicKeyFromDER(b) + if err != nil { + return opensslPublicKey{}, err + } + return opensslPublicKey{key: sk, cached: b}, nil +} + +// Verify compares a signature against input data +func (pk *opensslPublicKey) Verify(data, sig []byte) (bool, error) { + err := pk.key.VerifyPKCS1v15(openssl.SHA256_Method, data, sig) + return err == nil, err +} + +func (pk *opensslPublicKey) Type() pb.KeyType { + switch pk.key.KeyType() { + case openssl.KeyTypeRSA: + return pb.KeyType_RSA + default: + return -1 + } +} + +// Bytes returns protobuf bytes of a public key +func (pk *opensslPublicKey) Bytes() ([]byte, error) { + pk.cacheLk.Lock() + var err error + if pk.cached == nil { + pk.cached, err = MarshalPublicKey(pk) + } + pk.cacheLk.Unlock() + return pk.cached, err +} + +func (pk *opensslPublicKey) Raw() ([]byte, error) { + return pk.key.MarshalPKIXPublicKeyDER() +} + +// Equals checks whether this key is equal to another +func (pk *opensslPublicKey) Equals(k Key) bool { + k0, ok := k.(*RsaPublicKey) + if !ok { + return basicEquals(pk, k) + } + + return pk.key.Equal(k0.opensslPublicKey.key) +} + +// Sign returns a signature of the input data +func (sk *opensslPrivateKey) Sign(message []byte) ([]byte, error) { + return sk.key.SignPKCS1v15(openssl.SHA256_Method, message) +} + +// GetPublic returns a public key +func (sk *opensslPrivateKey) GetPublic() PubKey { + return &opensslPublicKey{key: sk.key} +} + +func (sk *opensslPrivateKey) Type() pb.KeyType { + switch sk.key.KeyType() { + case openssl.KeyTypeRSA: + return pb.KeyType_RSA + default: + return -1 + } +} + +// Bytes returns protobuf bytes from a private key +func (sk *opensslPrivateKey) Bytes() ([]byte, error) { + return MarshalPrivateKey(sk) +} + +func (sk *opensslPrivateKey) Raw() ([]byte, error) { + return sk.key.MarshalPKCS1PrivateKeyDER() +} + +// Equals checks whether this key is equal to another +func (sk *opensslPrivateKey) Equals(k Key) bool { + k0, ok := k.(*RsaPrivateKey) + if !ok { + return basicEquals(sk, k) + } + + return sk.key.Equal(k0.opensslPrivateKey.key) +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/Makefile b/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/Makefile new file mode 100644 index 0000000000..8af2dd8177 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(PWD)/../..:. --gogofaster_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/crypto.pb.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/crypto.pb.go new file mode 100644 index 0000000000..072fad9c93 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/crypto.pb.go @@ -0,0 +1,625 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: crypto.proto + +package crypto_pb + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type KeyType int32 + +const ( + KeyType_RSA KeyType = 0 + KeyType_Ed25519 KeyType = 1 + KeyType_Secp256k1 KeyType = 2 + KeyType_ECDSA KeyType = 3 +) + +var KeyType_name = map[int32]string{ + 0: "RSA", + 1: "Ed25519", + 2: "Secp256k1", + 3: "ECDSA", +} + +var KeyType_value = map[string]int32{ + "RSA": 0, + "Ed25519": 1, + "Secp256k1": 2, + "ECDSA": 3, +} + +func (x KeyType) Enum() *KeyType { + p := new(KeyType) + *p = x + return p +} + +func (x KeyType) String() string { + return proto.EnumName(KeyType_name, int32(x)) +} + +func (x *KeyType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(KeyType_value, data, "KeyType") + if err != nil { + return err + } + *x = KeyType(value) + return nil +} + +func (KeyType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_527278fb02d03321, []int{0} +} + +type PublicKey struct { + Type KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type"` + Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data"` +} + +func (m *PublicKey) Reset() { *m = PublicKey{} } +func (m *PublicKey) String() string { return proto.CompactTextString(m) } +func (*PublicKey) ProtoMessage() {} +func (*PublicKey) Descriptor() ([]byte, []int) { + return fileDescriptor_527278fb02d03321, []int{0} +} +func (m *PublicKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PublicKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PublicKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PublicKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_PublicKey.Merge(m, src) +} +func (m *PublicKey) XXX_Size() int { + return m.Size() +} +func (m *PublicKey) XXX_DiscardUnknown() { + xxx_messageInfo_PublicKey.DiscardUnknown(m) +} + +var xxx_messageInfo_PublicKey proto.InternalMessageInfo + +func (m *PublicKey) GetType() KeyType { + if m != nil { + return m.Type + } + return KeyType_RSA +} + +func (m *PublicKey) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type PrivateKey struct { + Type KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type"` + Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data"` +} + +func (m *PrivateKey) Reset() { *m = PrivateKey{} } +func (m *PrivateKey) String() string { return proto.CompactTextString(m) } +func (*PrivateKey) ProtoMessage() {} +func (*PrivateKey) Descriptor() ([]byte, []int) { + return fileDescriptor_527278fb02d03321, []int{1} +} +func (m *PrivateKey) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PrivateKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PrivateKey.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PrivateKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrivateKey.Merge(m, src) +} +func (m *PrivateKey) XXX_Size() int { + return m.Size() +} +func (m *PrivateKey) XXX_DiscardUnknown() { + xxx_messageInfo_PrivateKey.DiscardUnknown(m) +} + +var xxx_messageInfo_PrivateKey proto.InternalMessageInfo + +func (m *PrivateKey) GetType() KeyType { + if m != nil { + return m.Type + } + return KeyType_RSA +} + +func (m *PrivateKey) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterEnum("crypto.pb.KeyType", KeyType_name, KeyType_value) + proto.RegisterType((*PublicKey)(nil), "crypto.pb.PublicKey") + proto.RegisterType((*PrivateKey)(nil), "crypto.pb.PrivateKey") +} + +func init() { proto.RegisterFile("crypto.proto", fileDescriptor_527278fb02d03321) } + +var fileDescriptor_527278fb02d03321 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x2e, 0xaa, 0x2c, + 0x28, 0xc9, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0xf1, 0x92, 0x94, 0x82, 0xb9, + 0x38, 0x03, 0x4a, 0x93, 0x72, 0x32, 0x93, 0xbd, 0x53, 0x2b, 0x85, 0x74, 0xb8, 0x58, 0x42, 0x2a, + 0x0b, 0x52, 0x25, 0x18, 0x15, 0x98, 0x34, 0xf8, 0x8c, 0x84, 0xf4, 0xe0, 0xca, 0xf4, 0xbc, 0x53, + 0x2b, 0x41, 0x32, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x81, 0x55, 0x09, 0x49, 0x70, 0xb1, + 0xb8, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x29, 0x30, 0x69, 0xf0, 0xc0, 0x64, 0x40, 0x22, 0x4a, 0x21, + 0x5c, 0x5c, 0x01, 0x45, 0x99, 0x65, 0x89, 0x25, 0xa9, 0x54, 0x34, 0x55, 0xcb, 0x92, 0x8b, 0x1d, + 0xaa, 0x41, 0x88, 0x9d, 0x8b, 0x39, 0x28, 0xd8, 0x51, 0x80, 0x41, 0x88, 0x9b, 0x8b, 0xdd, 0x35, + 0xc5, 0xc8, 0xd4, 0xd4, 0xd0, 0x52, 0x80, 0x51, 0x88, 0x97, 0x8b, 0x33, 0x38, 0x35, 0xb9, 0xc0, + 0xc8, 0xd4, 0x2c, 0xdb, 0x50, 0x80, 0x49, 0x88, 0x93, 0x8b, 0xd5, 0xd5, 0xd9, 0x25, 0xd8, 0x51, + 0x80, 0xd9, 0x49, 0xe2, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, + 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x13, 0xbe, 0xd4, 0xff, 0x19, 0x01, 0x00, 0x00, +} + +func (m *PublicKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublicKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PublicKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintCrypto(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + i = encodeVarintCrypto(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *PrivateKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrivateKey) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PrivateKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintCrypto(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + i = encodeVarintCrypto(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintCrypto(dAtA []byte, offset int, v uint64) int { + offset -= sovCrypto(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PublicKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovCrypto(uint64(m.Type)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovCrypto(uint64(l)) + } + return n +} + +func (m *PrivateKey) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovCrypto(uint64(m.Type)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovCrypto(uint64(l)) + } + return n +} + +func sovCrypto(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCrypto(x uint64) (n int) { + return sovCrypto(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PublicKey) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCrypto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublicKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublicKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCrypto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= KeyType(b&0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCrypto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCrypto + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCrypto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipCrypto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCrypto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCrypto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Type") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Data") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrivateKey) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCrypto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrivateKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrivateKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCrypto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= KeyType(b&0x7F) << shift + if b < 0x80 { + break + } + } + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCrypto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCrypto + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCrypto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipCrypto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCrypto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCrypto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Type") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Data") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCrypto(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCrypto + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCrypto + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCrypto + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCrypto + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCrypto + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCrypto + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCrypto = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCrypto = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCrypto = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/crypto.proto b/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/crypto.proto new file mode 100644 index 0000000000..182b0d4847 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/pb/crypto.proto @@ -0,0 +1,22 @@ +syntax = "proto2"; + +package crypto.pb; + +option go_package = "github.com/libp2p/go-libp2p-core/crypto/pb"; + +enum KeyType { + RSA = 0; + Ed25519 = 1; + Secp256k1 = 2; + ECDSA = 3; +} + +message PublicKey { + required KeyType Type = 1; + required bytes Data = 2; +} + +message PrivateKey { + required KeyType Type = 1; + required bytes Data = 2; +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_common.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_common.go new file mode 100644 index 0000000000..c7e305439a --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_common.go @@ -0,0 +1,25 @@ +package crypto + +import ( + "fmt" + "os" +) + +// WeakRsaKeyEnv is an environment variable which, when set, lowers the +// minimum required bits of RSA keys to 512. This should be used exclusively in +// test situations. +const WeakRsaKeyEnv = "LIBP2P_ALLOW_WEAK_RSA_KEYS" + +var MinRsaKeyBits = 2048 + +// ErrRsaKeyTooSmall is returned when trying to generate or parse an RSA key +// that's smaller than MinRsaKeyBits bits. In test +var ErrRsaKeyTooSmall error + +func init() { + if _, ok := os.LookupEnv(WeakRsaKeyEnv); ok { + MinRsaKeyBits = 512 + } + + ErrRsaKeyTooSmall = fmt.Errorf("rsa keys must be >= %d bits to be useful", MinRsaKeyBits) +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_go.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_go.go new file mode 100644 index 0000000000..f28a327b83 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_go.go @@ -0,0 +1,152 @@ +// +build !openssl + +package crypto + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "errors" + "io" + "sync" + + pb "github.com/libp2p/go-libp2p-core/crypto/pb" + + "github.com/minio/sha256-simd" +) + +// RsaPrivateKey is an rsa private key +type RsaPrivateKey struct { + sk rsa.PrivateKey +} + +// RsaPublicKey is an rsa public key +type RsaPublicKey struct { + k rsa.PublicKey + + cacheLk sync.Mutex + cached []byte +} + +// GenerateRSAKeyPair generates a new rsa private and public key +func GenerateRSAKeyPair(bits int, src io.Reader) (PrivKey, PubKey, error) { + if bits < MinRsaKeyBits { + return nil, nil, ErrRsaKeyTooSmall + } + priv, err := rsa.GenerateKey(src, bits) + if err != nil { + return nil, nil, err + } + pk := priv.PublicKey + return &RsaPrivateKey{sk: *priv}, &RsaPublicKey{k: pk}, nil +} + +// Verify compares a signature against input data +func (pk *RsaPublicKey) Verify(data, sig []byte) (bool, error) { + hashed := sha256.Sum256(data) + err := rsa.VerifyPKCS1v15(&pk.k, crypto.SHA256, hashed[:], sig) + if err != nil { + return false, err + } + return true, nil +} + +func (pk *RsaPublicKey) Type() pb.KeyType { + return pb.KeyType_RSA +} + +// Bytes returns protobuf bytes of a public key +func (pk *RsaPublicKey) Bytes() ([]byte, error) { + pk.cacheLk.Lock() + var err error + if pk.cached == nil { + pk.cached, err = MarshalPublicKey(pk) + } + pk.cacheLk.Unlock() + return pk.cached, err +} + +func (pk *RsaPublicKey) Raw() ([]byte, error) { + return x509.MarshalPKIXPublicKey(&pk.k) +} + +// Equals checks whether this key is equal to another +func (pk *RsaPublicKey) Equals(k Key) bool { + // make sure this is an rsa public key + other, ok := (k).(*RsaPublicKey) + if !ok { + return basicEquals(pk, k) + } + + return pk.k.N.Cmp(other.k.N) == 0 && pk.k.E == other.k.E +} + +// Sign returns a signature of the input data +func (sk *RsaPrivateKey) Sign(message []byte) ([]byte, error) { + hashed := sha256.Sum256(message) + return rsa.SignPKCS1v15(rand.Reader, &sk.sk, crypto.SHA256, hashed[:]) +} + +// GetPublic returns a public key +func (sk *RsaPrivateKey) GetPublic() PubKey { + return &RsaPublicKey{k: sk.sk.PublicKey} +} + +func (sk *RsaPrivateKey) Type() pb.KeyType { + return pb.KeyType_RSA +} + +// Bytes returns protobuf bytes from a private key +func (sk *RsaPrivateKey) Bytes() ([]byte, error) { + return MarshalPrivateKey(sk) +} + +func (sk *RsaPrivateKey) Raw() ([]byte, error) { + b := x509.MarshalPKCS1PrivateKey(&sk.sk) + return b, nil +} + +// Equals checks whether this key is equal to another +func (sk *RsaPrivateKey) Equals(k Key) bool { + // make sure this is an rsa public key + other, ok := (k).(*RsaPrivateKey) + if !ok { + return basicEquals(sk, k) + } + + a := sk.sk + b := other.sk + + // Don't care about constant time. We're only comparing the public half. + return a.PublicKey.N.Cmp(b.PublicKey.N) == 0 && a.PublicKey.E == b.PublicKey.E +} + +// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes +func UnmarshalRsaPrivateKey(b []byte) (PrivKey, error) { + sk, err := x509.ParsePKCS1PrivateKey(b) + if err != nil { + return nil, err + } + if sk.N.BitLen() < MinRsaKeyBits { + return nil, ErrRsaKeyTooSmall + } + return &RsaPrivateKey{sk: *sk}, nil +} + +// UnmarshalRsaPublicKey returns a public key from the input x509 bytes +func UnmarshalRsaPublicKey(b []byte) (PubKey, error) { + pub, err := x509.ParsePKIXPublicKey(b) + if err != nil { + return nil, err + } + pk, ok := pub.(*rsa.PublicKey) + if !ok { + return nil, errors.New("not actually an rsa public key") + } + if pk.N.BitLen() < MinRsaKeyBits { + return nil, ErrRsaKeyTooSmall + } + + return &RsaPublicKey{k: *pk}, nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_openssl.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_openssl.go new file mode 100644 index 0000000000..8e7fb74315 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/rsa_openssl.go @@ -0,0 +1,68 @@ +// +build openssl + +package crypto + +import ( + "errors" + "io" + + openssl "github.com/libp2p/go-openssl" +) + +// RsaPrivateKey is an rsa private key +type RsaPrivateKey struct { + opensslPrivateKey +} + +// RsaPublicKey is an rsa public key +type RsaPublicKey struct { + opensslPublicKey +} + +// GenerateRSAKeyPair generates a new rsa private and public key +func GenerateRSAKeyPair(bits int, _ io.Reader) (PrivKey, PubKey, error) { + if bits < MinRsaKeyBits { + return nil, nil, ErrRsaKeyTooSmall + } + + key, err := openssl.GenerateRSAKey(bits) + if err != nil { + return nil, nil, err + } + return &RsaPrivateKey{opensslPrivateKey{key}}, &RsaPublicKey{opensslPublicKey{key: key}}, nil +} + +// GetPublic returns a public key +func (sk *RsaPrivateKey) GetPublic() PubKey { + return &RsaPublicKey{opensslPublicKey{key: sk.opensslPrivateKey.key}} +} + +// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes +func UnmarshalRsaPrivateKey(b []byte) (PrivKey, error) { + key, err := unmarshalOpensslPrivateKey(b) + if err != nil { + return nil, err + } + if 8*key.key.Size() < MinRsaKeyBits { + return nil, ErrRsaKeyTooSmall + } + if key.Type() != RSA { + return nil, errors.New("not actually an rsa public key") + } + return &RsaPrivateKey{key}, nil +} + +// UnmarshalRsaPublicKey returns a public key from the input x509 bytes +func UnmarshalRsaPublicKey(b []byte) (PubKey, error) { + key, err := unmarshalOpensslPublicKey(b) + if err != nil { + return nil, err + } + if 8*key.key.Size() < MinRsaKeyBits { + return nil, ErrRsaKeyTooSmall + } + if key.Type() != RSA { + return nil, errors.New("not actually an rsa public key") + } + return &RsaPublicKey{key}, nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/crypto/secp256k1.go b/vendor/github.com/libp2p/go-libp2p-core/crypto/secp256k1.go new file mode 100644 index 0000000000..6e98ea6bf7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/crypto/secp256k1.go @@ -0,0 +1,125 @@ +package crypto + +import ( + "fmt" + "io" + + pb "github.com/libp2p/go-libp2p-core/crypto/pb" + + btcec "github.com/btcsuite/btcd/btcec" + sha256 "github.com/minio/sha256-simd" +) + +// Secp256k1PrivateKey is an Secp256k1 private key +type Secp256k1PrivateKey btcec.PrivateKey + +// Secp256k1PublicKey is an Secp256k1 public key +type Secp256k1PublicKey btcec.PublicKey + +// GenerateSecp256k1Key generates a new Secp256k1 private and public key pair +func GenerateSecp256k1Key(src io.Reader) (PrivKey, PubKey, error) { + privk, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + return nil, nil, err + } + + k := (*Secp256k1PrivateKey)(privk) + return k, k.GetPublic(), nil +} + +// UnmarshalSecp256k1PrivateKey returns a private key from bytes +func UnmarshalSecp256k1PrivateKey(data []byte) (PrivKey, error) { + if len(data) != btcec.PrivKeyBytesLen { + return nil, fmt.Errorf("expected secp256k1 data size to be %d", btcec.PrivKeyBytesLen) + } + + privk, _ := btcec.PrivKeyFromBytes(btcec.S256(), data) + return (*Secp256k1PrivateKey)(privk), nil +} + +// UnmarshalSecp256k1PublicKey returns a public key from bytes +func UnmarshalSecp256k1PublicKey(data []byte) (PubKey, error) { + k, err := btcec.ParsePubKey(data, btcec.S256()) + if err != nil { + return nil, err + } + + return (*Secp256k1PublicKey)(k), nil +} + +// Bytes returns protobuf bytes from a private key +func (k *Secp256k1PrivateKey) Bytes() ([]byte, error) { + return MarshalPrivateKey(k) +} + +// Type returns the private key type +func (k *Secp256k1PrivateKey) Type() pb.KeyType { + return pb.KeyType_Secp256k1 +} + +// Raw returns the bytes of the key +func (k *Secp256k1PrivateKey) Raw() ([]byte, error) { + return (*btcec.PrivateKey)(k).Serialize(), nil +} + +// Equals compares two private keys +func (k *Secp256k1PrivateKey) Equals(o Key) bool { + sk, ok := o.(*Secp256k1PrivateKey) + if !ok { + return basicEquals(k, o) + } + + return k.GetPublic().Equals(sk.GetPublic()) +} + +// Sign returns a signature from input data +func (k *Secp256k1PrivateKey) Sign(data []byte) ([]byte, error) { + hash := sha256.Sum256(data) + sig, err := (*btcec.PrivateKey)(k).Sign(hash[:]) + if err != nil { + return nil, err + } + + return sig.Serialize(), nil +} + +// GetPublic returns a public key +func (k *Secp256k1PrivateKey) GetPublic() PubKey { + return (*Secp256k1PublicKey)((*btcec.PrivateKey)(k).PubKey()) +} + +// Bytes returns protobuf bytes from a public key +func (k *Secp256k1PublicKey) Bytes() ([]byte, error) { + return MarshalPublicKey(k) +} + +// Type returns the public key type +func (k *Secp256k1PublicKey) Type() pb.KeyType { + return pb.KeyType_Secp256k1 +} + +// Raw returns the bytes of the key +func (k *Secp256k1PublicKey) Raw() ([]byte, error) { + return (*btcec.PublicKey)(k).SerializeCompressed(), nil +} + +// Equals compares two public keys +func (k *Secp256k1PublicKey) Equals(o Key) bool { + sk, ok := o.(*Secp256k1PublicKey) + if !ok { + return basicEquals(k, o) + } + + return (*btcec.PublicKey)(k).IsEqual((*btcec.PublicKey)(sk)) +} + +// Verify compares a signature against the input data +func (k *Secp256k1PublicKey) Verify(data []byte, sigStr []byte) (bool, error) { + sig, err := btcec.ParseDERSignature(sigStr, btcec.S256()) + if err != nil { + return false, err + } + + hash := sha256.Sum256(data) + return sig.Verify(hash[:], (*btcec.PublicKey)(k)), nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo.go b/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo.go new file mode 100644 index 0000000000..5cbfe8c3ae --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo.go @@ -0,0 +1,98 @@ +package peer + +import ( + "fmt" + + ma "github.com/multiformats/go-multiaddr" +) + +// AddrInfo is a small struct used to pass around a peer with +// a set of addresses (and later, keys?). +type AddrInfo struct { + ID ID + Addrs []ma.Multiaddr +} + +var _ fmt.Stringer = AddrInfo{} + +func (pi AddrInfo) String() string { + return fmt.Sprintf("{%v: %v}", pi.ID, pi.Addrs) +} + +var ErrInvalidAddr = fmt.Errorf("invalid p2p multiaddr") + +// AddrInfosFromP2pAddrs converts a set of Multiaddrs to a set of AddrInfos. +func AddrInfosFromP2pAddrs(maddrs ...ma.Multiaddr) ([]AddrInfo, error) { + m := make(map[ID][]ma.Multiaddr) + for _, maddr := range maddrs { + transport, id := SplitAddr(maddr) + if id == "" { + return nil, ErrInvalidAddr + } + if transport == nil { + if _, ok := m[id]; !ok { + m[id] = nil + } + } else { + m[id] = append(m[id], transport) + } + } + ais := make([]AddrInfo, 0, len(m)) + for id, maddrs := range m { + ais = append(ais, AddrInfo{ID: id, Addrs: maddrs}) + } + return ais, nil +} + +// SplitAddr splits a p2p Multiaddr into a transport multiaddr and a peer ID. +// +// * Returns a nil transport if the address only contains a /p2p part. +// * Returns a empty peer ID if the address doesn't contain a /p2p part. +func SplitAddr(m ma.Multiaddr) (transport ma.Multiaddr, id ID) { + if m == nil { + return nil, "" + } + + transport, p2ppart := ma.SplitLast(m) + if p2ppart == nil || p2ppart.Protocol().Code != ma.P_P2P { + return m, "" + } + id = ID(p2ppart.RawValue()) // already validated by the multiaddr library. + return transport, id +} + +// AddrInfoFromP2pAddr converts a Multiaddr to an AddrInfo. +func AddrInfoFromP2pAddr(m ma.Multiaddr) (*AddrInfo, error) { + transport, id := SplitAddr(m) + if id == "" { + return nil, ErrInvalidAddr + } + info := &AddrInfo{ID: id} + if transport != nil { + info.Addrs = []ma.Multiaddr{transport} + } + return info, nil +} + +// AddrInfoToP2pAddrs converts an AddrInfo to a list of Multiaddrs. +func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) { + var addrs []ma.Multiaddr + p2ppart, err := ma.NewComponent("p2p", IDB58Encode(pi.ID)) + if err != nil { + return nil, err + } + if len(pi.Addrs) == 0 { + return []ma.Multiaddr{p2ppart}, nil + } + for _, addr := range pi.Addrs { + addrs = append(addrs, addr.Encapsulate(p2ppart)) + } + return addrs, nil +} + +func (pi *AddrInfo) Loggable() map[string]interface{} { + return map[string]interface{}{ + "peerID": pi.ID.Pretty(), + "addrs": pi.Addrs, + } +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo_serde.go b/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo_serde.go new file mode 100644 index 0000000000..1df24e2b73 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/addrinfo_serde.go @@ -0,0 +1,38 @@ +package peer + +import ( + "encoding/json" + + ma "github.com/multiformats/go-multiaddr" +) + +func (pi AddrInfo) MarshalJSON() ([]byte, error) { + out := make(map[string]interface{}) + out["ID"] = pi.ID.Pretty() + var addrs []string + for _, a := range pi.Addrs { + addrs = append(addrs, a.String()) + } + out["Addrs"] = addrs + return json.Marshal(out) +} + +func (pi *AddrInfo) UnmarshalJSON(b []byte) error { + var data map[string]interface{} + err := json.Unmarshal(b, &data) + if err != nil { + return err + } + pid, err := IDB58Decode(data["ID"].(string)) + if err != nil { + return err + } + pi.ID = pid + addrs, ok := data["Addrs"].([]interface{}) + if ok { + for _, a := range addrs { + pi.Addrs = append(pi.Addrs, ma.StringCast(a.(string))) + } + } + return nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/pb/Makefile b/vendor/github.com/libp2p/go-libp2p-core/peer/pb/Makefile new file mode 100644 index 0000000000..7cf8222f89 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/pb/peer_record.pb.go b/vendor/github.com/libp2p/go-libp2p-core/peer/pb/peer_record.pb.go new file mode 100644 index 0000000000..dd0755ef72 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/pb/peer_record.pb.go @@ -0,0 +1,606 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: peer_record.proto + +package peer_pb + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// PeerRecord messages contain information that is useful to share with other peers. +// Currently, a PeerRecord contains the public listen addresses for a peer, but this +// is expected to expand to include other information in the future. +// +// PeerRecords are designed to be serialized to bytes and placed inside of +// SignedEnvelopes before sharing with other peers. +// See https://github.com/libp2p/go-libp2p-core/record/pb/envelope.proto for +// the SignedEnvelope definition. +type PeerRecord struct { + // peer_id contains a libp2p peer id in its binary representation. + PeerId []byte `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` + // seq contains a monotonically-increasing sequence counter to order PeerRecords in time. + Seq uint64 `protobuf:"varint,2,opt,name=seq,proto3" json:"seq,omitempty"` + // addresses is a list of public listen addresses for the peer. + Addresses []*PeerRecord_AddressInfo `protobuf:"bytes,3,rep,name=addresses,proto3" json:"addresses,omitempty"` +} + +func (m *PeerRecord) Reset() { *m = PeerRecord{} } +func (m *PeerRecord) String() string { return proto.CompactTextString(m) } +func (*PeerRecord) ProtoMessage() {} +func (*PeerRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_dc0d8059ab0ad14d, []int{0} +} +func (m *PeerRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PeerRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PeerRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PeerRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerRecord.Merge(m, src) +} +func (m *PeerRecord) XXX_Size() int { + return m.Size() +} +func (m *PeerRecord) XXX_DiscardUnknown() { + xxx_messageInfo_PeerRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_PeerRecord proto.InternalMessageInfo + +func (m *PeerRecord) GetPeerId() []byte { + if m != nil { + return m.PeerId + } + return nil +} + +func (m *PeerRecord) GetSeq() uint64 { + if m != nil { + return m.Seq + } + return 0 +} + +func (m *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo { + if m != nil { + return m.Addresses + } + return nil +} + +// AddressInfo is a wrapper around a binary multiaddr. It is defined as a +// separate message to allow us to add per-address metadata in the future. +type PeerRecord_AddressInfo struct { + Multiaddr []byte `protobuf:"bytes,1,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"` +} + +func (m *PeerRecord_AddressInfo) Reset() { *m = PeerRecord_AddressInfo{} } +func (m *PeerRecord_AddressInfo) String() string { return proto.CompactTextString(m) } +func (*PeerRecord_AddressInfo) ProtoMessage() {} +func (*PeerRecord_AddressInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_dc0d8059ab0ad14d, []int{0, 0} +} +func (m *PeerRecord_AddressInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PeerRecord_AddressInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PeerRecord_AddressInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PeerRecord_AddressInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerRecord_AddressInfo.Merge(m, src) +} +func (m *PeerRecord_AddressInfo) XXX_Size() int { + return m.Size() +} +func (m *PeerRecord_AddressInfo) XXX_DiscardUnknown() { + xxx_messageInfo_PeerRecord_AddressInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_PeerRecord_AddressInfo proto.InternalMessageInfo + +func (m *PeerRecord_AddressInfo) GetMultiaddr() []byte { + if m != nil { + return m.Multiaddr + } + return nil +} + +func init() { + proto.RegisterType((*PeerRecord)(nil), "peer.pb.PeerRecord") + proto.RegisterType((*PeerRecord_AddressInfo)(nil), "peer.pb.PeerRecord.AddressInfo") +} + +func init() { proto.RegisterFile("peer_record.proto", fileDescriptor_dc0d8059ab0ad14d) } + +var fileDescriptor_dc0d8059ab0ad14d = []byte{ + // 189 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x48, 0x4d, 0x2d, + 0x8a, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, + 0x09, 0xe9, 0x15, 0x24, 0x29, 0x2d, 0x66, 0xe4, 0xe2, 0x0a, 0x48, 0x4d, 0x2d, 0x0a, 0x02, 0xcb, + 0x0a, 0x89, 0x73, 0x81, 0x65, 0xe2, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0xd8, + 0x40, 0x5c, 0xcf, 0x14, 0x21, 0x01, 0x2e, 0xe6, 0xe2, 0xd4, 0x42, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0x96, 0x20, 0x10, 0x53, 0xc8, 0x96, 0x8b, 0x33, 0x31, 0x25, 0xa5, 0x28, 0xb5, 0xb8, 0x38, 0xb5, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x5e, 0x0f, 0x6a, 0xac, 0x1e, 0xc2, 0x48, 0x3d, + 0x47, 0x88, 0x22, 0xcf, 0xbc, 0xb4, 0xfc, 0x20, 0x84, 0x0e, 0x29, 0x6d, 0x2e, 0x6e, 0x24, 0x19, + 0x21, 0x19, 0x2e, 0xce, 0xdc, 0xd2, 0x9c, 0x92, 0x4c, 0x90, 0x02, 0xa8, 0xd5, 0x08, 0x01, 0x27, + 0x89, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, + 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xfb, 0xc7, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x99, 0x56, 0x19, 0xe4, 0x00, 0x00, 0x00, +} + +func (m *PeerRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PeerRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PeerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPeerRecord(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Seq != 0 { + i = encodeVarintPeerRecord(dAtA, i, uint64(m.Seq)) + i-- + dAtA[i] = 0x10 + } + if len(m.PeerId) > 0 { + i -= len(m.PeerId) + copy(dAtA[i:], m.PeerId) + i = encodeVarintPeerRecord(dAtA, i, uint64(len(m.PeerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PeerRecord_AddressInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PeerRecord_AddressInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PeerRecord_AddressInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Multiaddr) > 0 { + i -= len(m.Multiaddr) + copy(dAtA[i:], m.Multiaddr) + i = encodeVarintPeerRecord(dAtA, i, uint64(len(m.Multiaddr))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintPeerRecord(dAtA []byte, offset int, v uint64) int { + offset -= sovPeerRecord(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PeerRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PeerId) + if l > 0 { + n += 1 + l + sovPeerRecord(uint64(l)) + } + if m.Seq != 0 { + n += 1 + sovPeerRecord(uint64(m.Seq)) + } + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovPeerRecord(uint64(l)) + } + } + return n +} + +func (m *PeerRecord_AddressInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Multiaddr) + if l > 0 { + n += 1 + l + sovPeerRecord(uint64(l)) + } + return n +} + +func sovPeerRecord(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPeerRecord(x uint64) (n int) { + return sovPeerRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PeerRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPeerRecord + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPeerRecord + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerId = append(m.PeerId[:0], dAtA[iNdEx:postIndex]...) + if m.PeerId == nil { + m.PeerId = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seq", wireType) + } + m.Seq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPeerRecord + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPeerRecord + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, &PeerRecord_AddressInfo{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPeerRecord(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPeerRecord + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPeerRecord + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PeerRecord_AddressInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddressInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddressInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Multiaddr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthPeerRecord + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthPeerRecord + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Multiaddr = append(m.Multiaddr[:0], dAtA[iNdEx:postIndex]...) + if m.Multiaddr == nil { + m.Multiaddr = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPeerRecord(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPeerRecord + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthPeerRecord + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPeerRecord(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPeerRecord + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPeerRecord + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPeerRecord + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPeerRecord + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPeerRecord = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPeerRecord = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPeerRecord = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/pb/peer_record.proto b/vendor/github.com/libp2p/go-libp2p-core/peer/pb/peer_record.proto new file mode 100644 index 0000000000..fb2835d8e6 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/pb/peer_record.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package peer.pb; + +// PeerRecord messages contain information that is useful to share with other peers. +// Currently, a PeerRecord contains the public listen addresses for a peer, but this +// is expected to expand to include other information in the future. +// +// PeerRecords are designed to be serialized to bytes and placed inside of +// SignedEnvelopes before sharing with other peers. +// See https://github.com/libp2p/go-libp2p-core/record/pb/envelope.proto for +// the SignedEnvelope definition. +message PeerRecord { + + // AddressInfo is a wrapper around a binary multiaddr. It is defined as a + // separate message to allow us to add per-address metadata in the future. + message AddressInfo { + bytes multiaddr = 1; + } + + // peer_id contains a libp2p peer id in its binary representation. + bytes peer_id = 1; + + // seq contains a monotonically-increasing sequence counter to order PeerRecords in time. + uint64 seq = 2; + + // addresses is a list of public listen addresses for the peer. + repeated AddressInfo addresses = 3; +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/peer.go b/vendor/github.com/libp2p/go-libp2p-core/peer/peer.go new file mode 100644 index 0000000000..176b9dffa7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/peer.go @@ -0,0 +1,245 @@ +// Package peer implements an object used to represent peers in the libp2p network. +package peer + +import ( + "encoding/hex" + "errors" + "fmt" + "strings" + + cid "github.com/ipfs/go-cid" + ic "github.com/libp2p/go-libp2p-core/crypto" + b58 "github.com/mr-tron/base58/base58" + mh "github.com/multiformats/go-multihash" +) + +var ( + // ErrEmptyPeerID is an error for empty peer ID. + ErrEmptyPeerID = errors.New("empty peer ID") + // ErrNoPublicKey is an error for peer IDs that don't embed public keys + ErrNoPublicKey = errors.New("public key is not embedded in peer ID") +) + +// AdvancedEnableInlining enables automatically inlining keys shorter than +// 42 bytes into the peer ID (using the "identity" multihash function). +// +// WARNING: This flag will likely be set to false in the future and eventually +// be removed in favor of using a hash function specified by the key itself. +// See: https://github.com/libp2p/specs/issues/138 +// +// DO NOT change this flag unless you know what you're doing. +// +// This currently defaults to true for backwards compatibility but will likely +// be set to false by default when an upgrade path is determined. +var AdvancedEnableInlining = true + +const maxInlineKeyLength = 42 + +// ID is a libp2p peer identity. +// +// Peer IDs are derived by hashing a peer's public key and encoding the +// hash output as a multihash. See IDFromPublicKey for details. +type ID string + +// Pretty returns a base58-encoded string representation of the ID. +func (id ID) Pretty() string { + return IDB58Encode(id) +} + +// Loggable returns a pretty peer ID string in loggable JSON format. +func (id ID) Loggable() map[string]interface{} { + return map[string]interface{}{ + "peerID": id.Pretty(), + } +} + +func (id ID) String() string { + return id.Pretty() +} + +// ShortString prints out the peer ID. +// +// TODO(brian): ensure correctness at ID generation and +// enforce this by only exposing functions that generate +// IDs safely. Then any peer.ID type found in the +// codebase is known to be correct. +func (id ID) ShortString() string { + pid := id.Pretty() + if len(pid) <= 10 { + return fmt.Sprintf("", pid) + } + return fmt.Sprintf("", pid[:2], pid[len(pid)-6:]) +} + +// MatchesPrivateKey tests whether this ID was derived from the secret key sk. +func (id ID) MatchesPrivateKey(sk ic.PrivKey) bool { + return id.MatchesPublicKey(sk.GetPublic()) +} + +// MatchesPublicKey tests whether this ID was derived from the public key pk. +func (id ID) MatchesPublicKey(pk ic.PubKey) bool { + oid, err := IDFromPublicKey(pk) + if err != nil { + return false + } + return oid == id +} + +// ExtractPublicKey attempts to extract the public key from an ID. +// +// This method returns ErrNoPublicKey if the peer ID looks valid but it can't extract +// the public key. +func (id ID) ExtractPublicKey() (ic.PubKey, error) { + decoded, err := mh.Decode([]byte(id)) + if err != nil { + return nil, err + } + if decoded.Code != mh.IDENTITY { + return nil, ErrNoPublicKey + } + pk, err := ic.UnmarshalPublicKey(decoded.Digest) + if err != nil { + return nil, err + } + return pk, nil +} + +// Validate checks if ID is empty or not. +func (id ID) Validate() error { + if id == ID("") { + return ErrEmptyPeerID + } + + return nil +} + +// IDFromString casts a string to the ID type, and validates +// the value to make sure it is a multihash. +func IDFromString(s string) (ID, error) { + if _, err := mh.Cast([]byte(s)); err != nil { + return ID(""), err + } + return ID(s), nil +} + +// IDFromBytes casts a byte slice to the ID type, and validates +// the value to make sure it is a multihash. +func IDFromBytes(b []byte) (ID, error) { + if _, err := mh.Cast(b); err != nil { + return ID(""), err + } + return ID(b), nil +} + +// IDB58Decode decodes a peer ID. +// +// Deprecated: Use Decode. +func IDB58Decode(s string) (ID, error) { + return Decode(s) +} + +// IDB58Encode returns the base58-encoded multihash representation of the ID. +// +// Deprecated: Use Encode. +func IDB58Encode(id ID) string { + return b58.Encode([]byte(id)) +} + +// IDHexDecode accepts a hex-encoded multihash representing a peer ID +// and returns the decoded ID if the input is valid. +// +// Deprecated: Don't raw-hex encode peer IDs, use base16 CIDs. +func IDHexDecode(s string) (ID, error) { + m, err := mh.FromHexString(s) + if err != nil { + return "", err + } + return ID(m), err +} + +// IDHexEncode returns the hex-encoded multihash representation of the ID. +// +// Deprecated: Don't raw-hex encode peer IDs, use base16 CIDs. +func IDHexEncode(id ID) string { + return hex.EncodeToString([]byte(id)) +} + +// Decode accepts an encoded peer ID and returns the decoded ID if the input is +// valid. +// +// The encoded peer ID can either be a CID of a key or a raw multihash (identity +// or sha256-256). +func Decode(s string) (ID, error) { + if strings.HasPrefix(s, "Qm") || strings.HasPrefix(s, "1") { + // base58 encoded sha256 or identity multihash + m, err := mh.FromB58String(s) + if err != nil { + return "", fmt.Errorf("failed to parse peer ID: %s", err) + } + return ID(m), nil + } + + c, err := cid.Decode(s) + if err != nil { + return "", fmt.Errorf("failed to parse peer ID: %s", err) + } + return FromCid(c) +} + +// Encode encodes a peer ID as a string. +// +// At the moment, it base58 encodes the peer ID but, in the future, it will +// switch to encoding it as a CID by default. +func Encode(id ID) string { + return IDB58Encode(id) +} + +// FromCid converts a CID to a peer ID, if possible. +func FromCid(c cid.Cid) (ID, error) { + ty := c.Type() + if ty != cid.Libp2pKey { + s := cid.CodecToStr[ty] + if s == "" { + s = fmt.Sprintf("[unknown multicodec %d]", ty) + } + return "", fmt.Errorf("can't convert CID of type %s to a peer ID", s) + } + return ID(c.Hash()), nil +} + +// ToCid encodes a peer ID as a CID of the public key. +// +// If the peer ID is invalid (e.g., empty), this will return the empty CID. +func ToCid(id ID) cid.Cid { + m, err := mh.Cast([]byte(id)) + if err != nil { + return cid.Cid{} + } + return cid.NewCidV1(cid.Libp2pKey, m) +} + +// IDFromPublicKey returns the Peer ID corresponding to the public key pk. +func IDFromPublicKey(pk ic.PubKey) (ID, error) { + b, err := pk.Bytes() + if err != nil { + return "", err + } + var alg uint64 = mh.SHA2_256 + if AdvancedEnableInlining && len(b) <= maxInlineKeyLength { + alg = mh.ID + } + hash, _ := mh.Sum(b, alg, -1) + return ID(hash), nil +} + +// IDFromPrivateKey returns the Peer ID corresponding to the secret key sk. +func IDFromPrivateKey(sk ic.PrivKey) (ID, error) { + return IDFromPublicKey(sk.GetPublic()) +} + +// IDSlice for sorting peers. +type IDSlice []ID + +func (es IDSlice) Len() int { return len(es) } +func (es IDSlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } +func (es IDSlice) Less(i, j int) bool { return string(es[i]) < string(es[j]) } diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/peer_serde.go b/vendor/github.com/libp2p/go-libp2p-core/peer/peer_serde.go new file mode 100644 index 0000000000..e401fbbb79 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/peer_serde.go @@ -0,0 +1,75 @@ +// Package peer contains Protobuf and JSON serialization/deserialization methods for peer IDs. +package peer + +import ( + "encoding" + "encoding/json" +) + +// Interface assertions commented out to avoid introducing hard dependencies to protobuf. +// var _ proto.Marshaler = (*ID)(nil) +// var _ proto.Unmarshaler = (*ID)(nil) +var _ json.Marshaler = (*ID)(nil) +var _ json.Unmarshaler = (*ID)(nil) + +var _ encoding.BinaryMarshaler = (*ID)(nil) +var _ encoding.BinaryUnmarshaler = (*ID)(nil) +var _ encoding.TextMarshaler = (*ID)(nil) +var _ encoding.TextUnmarshaler = (*ID)(nil) + +func (id ID) Marshal() ([]byte, error) { + return []byte(id), nil +} + +// MarshalBinary returns the byte representation of the peer ID. +func (id ID) MarshalBinary() ([]byte, error) { + return id.Marshal() +} + +func (id ID) MarshalTo(data []byte) (n int, err error) { + return copy(data, []byte(id)), nil +} + +func (id *ID) Unmarshal(data []byte) (err error) { + *id, err = IDFromBytes(data) + return err +} + +// UnmarshalBinary sets the ID from its binary representation. +func (id *ID) UnmarshalBinary(data []byte) error { + return id.Unmarshal(data) +} + +// Size implements Gogo's proto.Sizer, but we omit the compile-time assertion to avoid introducing a hard +// dependency on gogo. +func (id ID) Size() int { + return len([]byte(id)) +} + +func (id ID) MarshalJSON() ([]byte, error) { + return json.Marshal(IDB58Encode(id)) +} + +func (id *ID) UnmarshalJSON(data []byte) (err error) { + var v string + if err = json.Unmarshal(data, &v); err != nil { + return err + } + *id, err = IDB58Decode(v) + return err +} + +// MarshalText returns the text encoding of the ID. +func (id ID) MarshalText() ([]byte, error) { + return []byte(IDB58Encode(id)), nil +} + +// UnmarshalText restores the ID from its text encoding. +func (id *ID) UnmarshalText(data []byte) error { + pid, err := IDB58Decode(string(data)) + if err != nil { + return err + } + *id = pid + return nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/record.go b/vendor/github.com/libp2p/go-libp2p-core/peer/record.go new file mode 100644 index 0000000000..3638c337f3 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/record.go @@ -0,0 +1,232 @@ +package peer + +import ( + "fmt" + "time" + + pb "github.com/libp2p/go-libp2p-core/peer/pb" + "github.com/libp2p/go-libp2p-core/record" + + ma "github.com/multiformats/go-multiaddr" + + "github.com/gogo/protobuf/proto" +) + +var _ record.Record = (*PeerRecord)(nil) + +func init() { + record.RegisterType(&PeerRecord{}) +} + +// PeerRecordEnvelopeDomain is the domain string used for peer records contained in a Envelope. +const PeerRecordEnvelopeDomain = "libp2p-peer-record" + +// PeerRecordEnvelopePayloadType is the type hint used to identify peer records in a Envelope. +// Defined in https://github.com/multiformats/multicodec/blob/master/table.csv +// with name "libp2p-peer-record". +var PeerRecordEnvelopePayloadType = []byte{0x03, 0x01} + +// PeerRecord contains information that is broadly useful to share with other peers, +// either through a direct exchange (as in the libp2p identify protocol), or through +// a Peer Routing provider, such as a DHT. +// +// Currently, a PeerRecord contains the public listen addresses for a peer, but this +// is expected to expand to include other information in the future. +// +// PeerRecords are ordered in time by their Seq field. Newer PeerRecords must have +// greater Seq values than older records. The NewPeerRecord function will create +// a PeerRecord with a timestamp-based Seq value. The other PeerRecord fields should +// be set by the caller: +// +// rec := peer.NewPeerRecord() +// rec.PeerID = aPeerID +// rec.Addrs = someAddrs +// +// Alternatively, you can construct a PeerRecord struct directly and use the TimestampSeq +// helper to set the Seq field: +// +// rec := peer.PeerRecord{PeerID: aPeerID, Addrs: someAddrs, Seq: peer.TimestampSeq()} +// +// Failing to set the Seq field will not result in an error, however, a PeerRecord with a +// Seq value of zero may be ignored or rejected by other peers. +// +// PeerRecords are intended to be shared with other peers inside a signed +// routing.Envelope, and PeerRecord implements the routing.Record interface +// to facilitate this. +// +// To share a PeerRecord, first call Sign to wrap the record in a Envelope +// and sign it with the local peer's private key: +// +// rec := &PeerRecord{PeerID: myPeerId, Addrs: myAddrs} +// envelope, err := rec.Sign(myPrivateKey) +// +// The resulting record.Envelope can be marshalled to a []byte and shared +// publicly. As a convenience, the MarshalSigned method will produce the +// Envelope and marshal it to a []byte in one go: +// +// rec := &PeerRecord{PeerID: myPeerId, Addrs: myAddrs} +// recordBytes, err := rec.MarshalSigned(myPrivateKey) +// +// To validate and unmarshal a signed PeerRecord from a remote peer, +// "consume" the containing envelope, which will return both the +// routing.Envelope and the inner Record. The Record must be cast to +// a PeerRecord pointer before use: +// +// envelope, untypedRecord, err := ConsumeEnvelope(envelopeBytes, PeerRecordEnvelopeDomain) +// if err != nil { +// handleError(err) +// return +// } +// peerRec := untypedRecord.(*PeerRecord) +// +type PeerRecord struct { + // PeerID is the ID of the peer this record pertains to. + PeerID ID + + // Addrs contains the public addresses of the peer this record pertains to. + Addrs []ma.Multiaddr + + // Seq is a monotonically-increasing sequence counter that's used to order + // PeerRecords in time. The interval between Seq values is unspecified, + // but newer PeerRecords MUST have a greater Seq value than older records + // for the same peer. + Seq uint64 +} + +// NewPeerRecord returns a PeerRecord with a timestamp-based sequence number. +// The returned record is otherwise empty and should be populated by the caller. +func NewPeerRecord() *PeerRecord { + return &PeerRecord{Seq: TimestampSeq()} +} + +// PeerRecordFromAddrInfo creates a PeerRecord from an AddrInfo struct. +// The returned record will have a timestamp-based sequence number. +func PeerRecordFromAddrInfo(info AddrInfo) *PeerRecord { + rec := NewPeerRecord() + rec.PeerID = info.ID + rec.Addrs = info.Addrs + return rec +} + +// PeerRecordFromProtobuf creates a PeerRecord from a protobuf PeerRecord +// struct. +func PeerRecordFromProtobuf(msg *pb.PeerRecord) (*PeerRecord, error) { + record := &PeerRecord{} + + var id ID + if err := id.UnmarshalBinary(msg.PeerId); err != nil { + return nil, err + } + + record.PeerID = id + record.Addrs = addrsFromProtobuf(msg.Addresses) + record.Seq = msg.Seq + + return record, nil +} + +// TimestampSeq is a helper to generate a timestamp-based sequence number for a PeerRecord. +func TimestampSeq() uint64 { + return uint64(time.Now().UnixNano()) +} + +// Domain is used when signing and validating PeerRecords contained in Envelopes. +// It is constant for all PeerRecord instances. +func (r *PeerRecord) Domain() string { + return PeerRecordEnvelopeDomain +} + +// Codec is a binary identifier for the PeerRecord type. It is constant for all PeerRecord instances. +func (r *PeerRecord) Codec() []byte { + return PeerRecordEnvelopePayloadType +} + +// UnmarshalRecord parses a PeerRecord from a byte slice. +// This method is called automatically when consuming a record.Envelope +// whose PayloadType indicates that it contains a PeerRecord. +// It is generally not necessary or recommended to call this method directly. +func (r *PeerRecord) UnmarshalRecord(bytes []byte) error { + if r == nil { + return fmt.Errorf("cannot unmarshal PeerRecord to nil receiver") + } + + var msg pb.PeerRecord + err := proto.Unmarshal(bytes, &msg) + if err != nil { + return err + } + + rPtr, err := PeerRecordFromProtobuf(&msg) + if err != nil { + return err + } + *r = *rPtr + + return nil +} + +// MarshalRecord serializes a PeerRecord to a byte slice. +// This method is called automatically when constructing a routing.Envelope +// using Seal or PeerRecord.Sign. +func (r *PeerRecord) MarshalRecord() ([]byte, error) { + msg, err := r.ToProtobuf() + if err != nil { + return nil, err + } + return proto.Marshal(msg) +} + +// Equal returns true if the other PeerRecord is identical to this one. +func (r *PeerRecord) Equal(other *PeerRecord) bool { + if other == nil { + return r == nil + } + if r.PeerID != other.PeerID { + return false + } + if r.Seq != other.Seq { + return false + } + if len(r.Addrs) != len(other.Addrs) { + return false + } + for i, _ := range r.Addrs { + if !r.Addrs[i].Equal(other.Addrs[i]) { + return false + } + } + return true +} + +// ToProtobuf returns the equivalent Protocol Buffer struct object of a PeerRecord. +func (r *PeerRecord) ToProtobuf() (*pb.PeerRecord, error) { + idBytes, err := r.PeerID.MarshalBinary() + if err != nil { + return nil, err + } + return &pb.PeerRecord{ + PeerId: idBytes, + Addresses: addrsToProtobuf(r.Addrs), + Seq: r.Seq, + }, nil +} + +func addrsFromProtobuf(addrs []*pb.PeerRecord_AddressInfo) []ma.Multiaddr { + var out []ma.Multiaddr + for _, addr := range addrs { + a, err := ma.NewMultiaddrBytes(addr.Multiaddr) + if err != nil { + continue + } + out = append(out, a) + } + return out +} + +func addrsToProtobuf(addrs []ma.Multiaddr) []*pb.PeerRecord_AddressInfo { + var out []*pb.PeerRecord_AddressInfo + for _, addr := range addrs { + out = append(out, &pb.PeerRecord_AddressInfo{Multiaddr: addr.Bytes()}) + } + return out +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/peer/set.go b/vendor/github.com/libp2p/go-libp2p-core/peer/set.go new file mode 100644 index 0000000000..2251a677e2 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/peer/set.go @@ -0,0 +1,71 @@ +package peer + +import ( + "sync" +) + +// PeerSet is a threadsafe set of peers. +type Set struct { + lk sync.RWMutex + ps map[ID]struct{} + + size int +} + +func NewSet() *Set { + ps := new(Set) + ps.ps = make(map[ID]struct{}) + ps.size = -1 + return ps +} + +func NewLimitedSet(size int) *Set { + ps := new(Set) + ps.ps = make(map[ID]struct{}) + ps.size = size + return ps +} + +func (ps *Set) Add(p ID) { + ps.lk.Lock() + ps.ps[p] = struct{}{} + ps.lk.Unlock() +} + +func (ps *Set) Contains(p ID) bool { + ps.lk.RLock() + _, ok := ps.ps[p] + ps.lk.RUnlock() + return ok +} + +func (ps *Set) Size() int { + ps.lk.RLock() + defer ps.lk.RUnlock() + return len(ps.ps) +} + +// TryAdd Attempts to add the given peer into the set. +// This operation can fail for one of two reasons: +// 1) The given peer is already in the set +// 2) The number of peers in the set is equal to size +func (ps *Set) TryAdd(p ID) bool { + var success bool + ps.lk.Lock() + if _, ok := ps.ps[p]; !ok && (len(ps.ps) < ps.size || ps.size == -1) { + success = true + ps.ps[p] = struct{}{} + } + ps.lk.Unlock() + return success +} + +func (ps *Set) Peers() []ID { + ps.lk.Lock() + out := make([]ID, 0, len(ps.ps)) + for p, _ := range ps.ps { + out = append(out, p) + } + ps.lk.Unlock() + return out +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/protocol/id.go b/vendor/github.com/libp2p/go-libp2p-core/protocol/id.go new file mode 100644 index 0000000000..9df3b5bcf1 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/protocol/id.go @@ -0,0 +1,29 @@ +package protocol + +// ID is an identifier used to write protocol headers in streams. +type ID string + +// These are reserved protocol.IDs. +const ( + TestingID ID = "/p2p/_testing" +) + +// ConvertFromStrings is a convenience function that takes a slice of strings and +// converts it to a slice of protocol.ID. +func ConvertFromStrings(ids []string) (res []ID) { + res = make([]ID, 0, len(ids)) + for _, id := range ids { + res = append(res, ID(id)) + } + return res +} + +// ConvertToStrings is a convenience function that takes a slice of protocol.ID and +// converts it to a slice of strings. +func ConvertToStrings(ids []ID) (res []string) { + res = make([]string, 0, len(ids)) + for _, id := range ids { + res = append(res, string(id)) + } + return res +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/protocol/switch.go b/vendor/github.com/libp2p/go-libp2p-core/protocol/switch.go new file mode 100644 index 0000000000..f3ac369bec --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/protocol/switch.go @@ -0,0 +1,81 @@ +// Package protocol provides core interfaces for protocol routing and negotiation in libp2p. +package protocol + +import ( + "io" +) + +// HandlerFunc is a user-provided function used by the Router to +// handle a protocol/stream. +// +// Will be invoked with the protocol ID string as the first argument, +// which may differ from the ID used for registration if the handler +// was registered using a match function. +type HandlerFunc = func(protocol string, rwc io.ReadWriteCloser) error + +// Router is an interface that allows users to add and remove protocol handlers, +// which will be invoked when incoming stream requests for registered protocols +// are accepted. +// +// Upon receiving an incoming stream request, the Router will check all registered +// protocol handlers to determine which (if any) is capable of handling the stream. +// The handlers are checked in order of registration; if multiple handlers are +// eligible, only the first to be registered will be invoked. +type Router interface { + + // AddHandler registers the given handler to be invoked for + // an exact literal match of the given protocol ID string. + AddHandler(protocol string, handler HandlerFunc) + + // AddHandlerWithFunc registers the given handler to be invoked + // when the provided match function returns true. + // + // The match function will be invoked with an incoming protocol + // ID string, and should return true if the handler supports + // the protocol. Note that the protocol ID argument is not + // used for matching; if you want to match the protocol ID + // string exactly, you must check for it in your match function. + AddHandlerWithFunc(protocol string, match func(string) bool, handler HandlerFunc) + + // RemoveHandler removes the registered handler (if any) for the + // given protocol ID string. + RemoveHandler(protocol string) + + // Protocols returns a list of all registered protocol ID strings. + // Note that the Router may be able to handle protocol IDs not + // included in this list if handlers were added with match functions + // using AddHandlerWithFunc. + Protocols() []string +} + +// Negotiator is a component capable of reaching agreement over what protocols +// to use for inbound streams of communication. +type Negotiator interface { + + // NegotiateLazy will return the registered protocol handler to use + // for a given inbound stream, returning as soon as the protocol has been + // determined. Returns an error if negotiation fails. + // + // NegotiateLazy may return before all protocol negotiation responses have been + // written to the stream. This is in contrast to Negotiate, which will block until + // the Negotiator is finished with the stream. + NegotiateLazy(rwc io.ReadWriteCloser) (io.ReadWriteCloser, string, HandlerFunc, error) + + // Negotiate will return the registered protocol handler to use for a given + // inbound stream, returning after the protocol has been determined and the + // Negotiator has finished using the stream for negotiation. Returns an + // error if negotiation fails. + Negotiate(rwc io.ReadWriteCloser) (string, HandlerFunc, error) + + // Handle calls Negotiate to determine which protocol handler to use for an + // inbound stream, then invokes the protocol handler function, passing it + // the protocol ID and the stream. Returns an error if negotiation fails. + Handle(rwc io.ReadWriteCloser) error +} + +// Switch is the component responsible for "dispatching" incoming stream requests to +// their corresponding stream handlers. It is both a Negotiator and a Router. +type Switch interface { + Router + Negotiator +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/record/envelope.go b/vendor/github.com/libp2p/go-libp2p-core/record/envelope.go new file mode 100644 index 0000000000..bdc33abdf1 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/record/envelope.go @@ -0,0 +1,297 @@ +package record + +import ( + "bytes" + "errors" + "fmt" + "sync" + + "github.com/libp2p/go-libp2p-core/crypto" + pb "github.com/libp2p/go-libp2p-core/record/pb" + + pool "github.com/libp2p/go-buffer-pool" + + "github.com/gogo/protobuf/proto" + "github.com/multiformats/go-varint" +) + +// Envelope contains an arbitrary []byte payload, signed by a libp2p peer. +// +// Envelopes are signed in the context of a particular "domain", which is a +// string specified when creating and verifying the envelope. You must know the +// domain string used to produce the envelope in order to verify the signature +// and access the payload. +type Envelope struct { + // The public key that can be used to verify the signature and derive the peer id of the signer. + PublicKey crypto.PubKey + + // A binary identifier that indicates what kind of data is contained in the payload. + // TODO(yusef): enforce multicodec prefix + PayloadType []byte + + // The envelope payload. + RawPayload []byte + + // The signature of the domain string :: type hint :: payload. + signature []byte + + // the unmarshalled payload as a Record, cached on first access via the Record accessor method + cached Record + unmarshalError error + unmarshalOnce sync.Once +} + +var ErrEmptyDomain = errors.New("envelope domain must not be empty") +var ErrEmptyPayloadType = errors.New("payloadType must not be empty") +var ErrInvalidSignature = errors.New("invalid signature or incorrect domain") + +// Seal marshals the given Record, places the marshaled bytes inside an Envelope, +// and signs with the given private key. +func Seal(rec Record, privateKey crypto.PrivKey) (*Envelope, error) { + payload, err := rec.MarshalRecord() + if err != nil { + return nil, fmt.Errorf("error marshaling record: %v", err) + } + + domain := rec.Domain() + payloadType := rec.Codec() + if domain == "" { + return nil, ErrEmptyDomain + } + + if len(payloadType) == 0 { + return nil, ErrEmptyPayloadType + } + + unsigned, err := makeUnsigned(domain, payloadType, payload) + if err != nil { + return nil, err + } + defer pool.Put(unsigned) + + sig, err := privateKey.Sign(unsigned) + if err != nil { + return nil, err + } + + return &Envelope{ + PublicKey: privateKey.GetPublic(), + PayloadType: payloadType, + RawPayload: payload, + signature: sig, + }, nil +} + +// ConsumeEnvelope unmarshals a serialized Envelope and validates its +// signature using the provided 'domain' string. If validation fails, an error +// is returned, along with the unmarshalled envelope so it can be inspected. +// +// On success, ConsumeEnvelope returns the Envelope itself, as well as the inner payload, +// unmarshalled into a concrete Record type. The actual type of the returned Record depends +// on what has been registered for the Envelope's PayloadType (see RegisterType for details). +// +// You can type assert on the returned Record to convert it to an instance of the concrete +// Record type: +// +// envelope, rec, err := ConsumeEnvelope(envelopeBytes, peer.PeerRecordEnvelopeDomain) +// if err != nil { +// handleError(envelope, err) // envelope may be non-nil, even if errors occur! +// return +// } +// peerRec, ok := rec.(*peer.PeerRecord) +// if ok { +// doSomethingWithPeerRecord(peerRec) +// } +// +// Important: you MUST check the error value before using the returned Envelope. In some error +// cases, including when the envelope signature is invalid, both the Envelope and an error will +// be returned. This allows you to inspect the unmarshalled but invalid Envelope. As a result, +// you must not assume that any non-nil Envelope returned from this function is valid. +// +// If the Envelope signature is valid, but no Record type is registered for the Envelope's +// PayloadType, ErrPayloadTypeNotRegistered will be returned, along with the Envelope and +// a nil Record. +func ConsumeEnvelope(data []byte, domain string) (envelope *Envelope, rec Record, err error) { + e, err := UnmarshalEnvelope(data) + if err != nil { + return nil, nil, fmt.Errorf("failed when unmarshalling the envelope: %w", err) + } + + err = e.validate(domain) + if err != nil { + return e, nil, fmt.Errorf("failed to validate envelope: %w", err) + } + + rec, err = e.Record() + if err != nil { + return e, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err) + } + return e, rec, nil +} + +// ConsumeTypedEnvelope unmarshals a serialized Envelope and validates its +// signature. If validation fails, an error is returned, along with the unmarshalled +// envelope so it can be inspected. +// +// Unlike ConsumeEnvelope, ConsumeTypedEnvelope does not try to automatically determine +// the type of Record to unmarshal the Envelope's payload into. Instead, the caller provides +// a destination Record instance, which will unmarshal the Envelope payload. It is the caller's +// responsibility to determine whether the given Record type is able to unmarshal the payload +// correctly. +// +// rec := &MyRecordType{} +// envelope, err := ConsumeTypedEnvelope(envelopeBytes, rec) +// if err != nil { +// handleError(envelope, err) +// } +// doSomethingWithRecord(rec) +// +// Important: you MUST check the error value before using the returned Envelope. In some error +// cases, including when the envelope signature is invalid, both the Envelope and an error will +// be returned. This allows you to inspect the unmarshalled but invalid Envelope. As a result, +// you must not assume that any non-nil Envelope returned from this function is valid. +func ConsumeTypedEnvelope(data []byte, destRecord Record) (envelope *Envelope, err error) { + e, err := UnmarshalEnvelope(data) + if err != nil { + return nil, fmt.Errorf("failed when unmarshalling the envelope: %w", err) + } + + err = e.validate(destRecord.Domain()) + if err != nil { + return e, fmt.Errorf("failed to validate envelope: %w", err) + } + + err = destRecord.UnmarshalRecord(e.RawPayload) + if err != nil { + return e, fmt.Errorf("failed to unmarshal envelope payload: %w", err) + } + e.cached = destRecord + return e, nil +} + +// UnmarshalEnvelope unmarshals a serialized Envelope protobuf message, +// without validating its contents. Most users should use ConsumeEnvelope. +func UnmarshalEnvelope(data []byte) (*Envelope, error) { + var e pb.Envelope + if err := proto.Unmarshal(data, &e); err != nil { + return nil, err + } + + key, err := crypto.PublicKeyFromProto(e.PublicKey) + if err != nil { + return nil, err + } + + return &Envelope{ + PublicKey: key, + PayloadType: e.PayloadType, + RawPayload: e.Payload, + signature: e.Signature, + }, nil +} + +// Marshal returns a byte slice containing a serialized protobuf representation +// of a Envelope. +func (e *Envelope) Marshal() ([]byte, error) { + key, err := crypto.PublicKeyToProto(e.PublicKey) + if err != nil { + return nil, err + } + + msg := pb.Envelope{ + PublicKey: key, + PayloadType: e.PayloadType, + Payload: e.RawPayload, + Signature: e.signature, + } + return proto.Marshal(&msg) +} + +// Equal returns true if the other Envelope has the same public key, +// payload, payload type, and signature. This implies that they were also +// created with the same domain string. +func (e *Envelope) Equal(other *Envelope) bool { + if other == nil { + return e == nil + } + return e.PublicKey.Equals(other.PublicKey) && + bytes.Compare(e.PayloadType, other.PayloadType) == 0 && + bytes.Compare(e.signature, other.signature) == 0 && + bytes.Compare(e.RawPayload, other.RawPayload) == 0 +} + +// Record returns the Envelope's payload unmarshalled as a Record. +// The concrete type of the returned Record depends on which Record +// type was registered for the Envelope's PayloadType - see record.RegisterType. +// +// Once unmarshalled, the Record is cached for future access. +func (e *Envelope) Record() (Record, error) { + e.unmarshalOnce.Do(func() { + if e.cached != nil { + return + } + e.cached, e.unmarshalError = unmarshalRecordPayload(e.PayloadType, e.RawPayload) + }) + return e.cached, e.unmarshalError +} + +// TypedRecord unmarshals the Envelope's payload to the given Record instance. +// It is the caller's responsibility to ensure that the Record type is capable +// of unmarshalling the Envelope payload. Callers can inspect the Envelope's +// PayloadType field to determine the correct type of Record to use. +// +// This method will always unmarshal the Envelope payload even if a cached record +// exists. +func (e *Envelope) TypedRecord(dest Record) error { + return dest.UnmarshalRecord(e.RawPayload) +} + +// validate returns nil if the envelope signature is valid for the given 'domain', +// or an error if signature validation fails. +func (e *Envelope) validate(domain string) error { + unsigned, err := makeUnsigned(domain, e.PayloadType, e.RawPayload) + if err != nil { + return err + } + defer pool.Put(unsigned) + + valid, err := e.PublicKey.Verify(unsigned, e.signature) + if err != nil { + return fmt.Errorf("failed while verifying signature: %w", err) + } + if !valid { + return ErrInvalidSignature + } + return nil +} + +// makeUnsigned is a helper function that prepares a buffer to sign or verify. +// It returns a byte slice from a pool. The caller MUST return this slice to the +// pool. +func makeUnsigned(domain string, payloadType []byte, payload []byte) ([]byte, error) { + var ( + fields = [][]byte{[]byte(domain), payloadType, payload} + + // fields are prefixed with their length as an unsigned varint. we + // compute the lengths before allocating the sig buffer so we know how + // much space to add for the lengths + flen = make([][]byte, len(fields)) + size = 0 + ) + + for i, f := range fields { + l := len(f) + flen[i] = varint.ToUvarint(uint64(l)) + size += l + len(flen[i]) + } + + b := pool.Get(size) + + var s int + for i, f := range fields { + s += copy(b[s:], flen[i]) + s += copy(b[s:], f) + } + + return b[:s], nil +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/record/pb/Makefile b/vendor/github.com/libp2p/go-libp2p-core/record/pb/Makefile new file mode 100644 index 0000000000..7cf8222f89 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/record/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p-core/record/pb/envelope.pb.go b/vendor/github.com/libp2p/go-libp2p-core/record/pb/envelope.pb.go new file mode 100644 index 0000000000..412809f13a --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/record/pb/envelope.pb.go @@ -0,0 +1,504 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: envelope.proto + +package record_pb + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + pb "github.com/libp2p/go-libp2p-core/crypto/pb" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Envelope encloses a signed payload produced by a peer, along with the public +// key of the keypair it was signed with so that it can be statelessly validated +// by the receiver. +// +// The payload is prefixed with a byte string that determines the type, so it +// can be deserialized deterministically. Often, this byte string is a +// multicodec. +type Envelope struct { + // public_key is the public key of the keypair the enclosed payload was + // signed with. + PublicKey *pb.PublicKey `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // payload_type encodes the type of payload, so that it can be deserialized + // deterministically. + PayloadType []byte `protobuf:"bytes,2,opt,name=payload_type,json=payloadType,proto3" json:"payload_type,omitempty"` + // payload is the actual payload carried inside this envelope. + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + // signature is the signature produced by the private key corresponding to + // the enclosed public key, over the payload, prefixing a domain string for + // additional security. + Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (m *Envelope) Reset() { *m = Envelope{} } +func (m *Envelope) String() string { return proto.CompactTextString(m) } +func (*Envelope) ProtoMessage() {} +func (*Envelope) Descriptor() ([]byte, []int) { + return fileDescriptor_ee266e8c558e9dc5, []int{0} +} +func (m *Envelope) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Envelope) XXX_Merge(src proto.Message) { + xxx_messageInfo_Envelope.Merge(m, src) +} +func (m *Envelope) XXX_Size() int { + return m.Size() +} +func (m *Envelope) XXX_DiscardUnknown() { + xxx_messageInfo_Envelope.DiscardUnknown(m) +} + +var xxx_messageInfo_Envelope proto.InternalMessageInfo + +func (m *Envelope) GetPublicKey() *pb.PublicKey { + if m != nil { + return m.PublicKey + } + return nil +} + +func (m *Envelope) GetPayloadType() []byte { + if m != nil { + return m.PayloadType + } + return nil +} + +func (m *Envelope) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *Envelope) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +func init() { + proto.RegisterType((*Envelope)(nil), "record.pb.Envelope") +} + +func init() { proto.RegisterFile("envelope.proto", fileDescriptor_ee266e8c558e9dc5) } + +var fileDescriptor_ee266e8c558e9dc5 = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcd, 0x2b, 0x4b, + 0xcd, 0xc9, 0x2f, 0x48, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2c, 0x4a, 0x4d, 0xce, + 0x2f, 0x4a, 0xd1, 0x2b, 0x48, 0x92, 0x12, 0x4b, 0x2e, 0xaa, 0x2c, 0x28, 0xc9, 0xd7, 0x2f, 0x48, + 0xd2, 0x87, 0xb0, 0x20, 0x4a, 0x94, 0x66, 0x31, 0x72, 0x71, 0xb8, 0x42, 0x75, 0x09, 0x19, 0x73, + 0x71, 0x15, 0x94, 0x26, 0xe5, 0x64, 0x26, 0xc7, 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x1b, 0x89, 0xe8, 0xc1, 0xd4, 0x27, 0xe9, 0x05, 0x80, 0x25, 0xbd, 0x53, 0x2b, 0x83, 0x38, + 0x0b, 0x60, 0x4c, 0x21, 0x45, 0x2e, 0x9e, 0x82, 0xc4, 0xca, 0x9c, 0xfc, 0xc4, 0x94, 0xf8, 0x92, + 0xca, 0x82, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x6e, 0xa8, 0x58, 0x48, 0x65, 0x41, + 0xaa, 0x90, 0x04, 0x17, 0x3b, 0x94, 0x2b, 0xc1, 0x0c, 0x96, 0x85, 0x71, 0x85, 0x64, 0xb8, 0x38, + 0x8b, 0x33, 0xd3, 0xf3, 0x12, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x58, 0xc1, 0x72, 0x08, 0x01, 0x27, + 0x89, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, + 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xbb, 0xde, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x0b, 0xd9, 0x6d, 0xf2, 0x00, 0x00, 0x00, +} + +func (m *Envelope) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signature) > 0 { + i -= len(m.Signature) + copy(dAtA[i:], m.Signature) + i = encodeVarintEnvelope(dAtA, i, uint64(len(m.Signature))) + i-- + dAtA[i] = 0x2a + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintEnvelope(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x1a + } + if len(m.PayloadType) > 0 { + i -= len(m.PayloadType) + copy(dAtA[i:], m.PayloadType) + i = encodeVarintEnvelope(dAtA, i, uint64(len(m.PayloadType))) + i-- + dAtA[i] = 0x12 + } + if m.PublicKey != nil { + { + size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEnvelope(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEnvelope(dAtA []byte, offset int, v uint64) int { + offset -= sovEnvelope(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Envelope) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PublicKey != nil { + l = m.PublicKey.Size() + n += 1 + l + sovEnvelope(uint64(l)) + } + l = len(m.PayloadType) + if l > 0 { + n += 1 + l + sovEnvelope(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovEnvelope(uint64(l)) + } + l = len(m.Signature) + if l > 0 { + n += 1 + l + sovEnvelope(uint64(l)) + } + return n +} + +func sovEnvelope(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEnvelope(x uint64) (n int) { + return sovEnvelope(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Envelope) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Envelope: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEnvelope + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PublicKey == nil { + m.PublicKey = &pb.PublicKey{} + } + if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PayloadType", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthEnvelope + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PayloadType = append(m.PayloadType[:0], dAtA[iNdEx:postIndex]...) + if m.PayloadType == nil { + m.PayloadType = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthEnvelope + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEnvelope + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthEnvelope + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) + if m.Signature == nil { + m.Signature = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEnvelope(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvelope + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEnvelope + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEnvelope(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvelope + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvelope + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEnvelope + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEnvelope + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEnvelope + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEnvelope + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEnvelope = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEnvelope = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEnvelope = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/libp2p/go-libp2p-core/record/pb/envelope.proto b/vendor/github.com/libp2p/go-libp2p-core/record/pb/envelope.proto new file mode 100644 index 0000000000..ca3555fbf7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/record/pb/envelope.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package record.pb; + +import "crypto/pb/crypto.proto"; + +// Envelope encloses a signed payload produced by a peer, along with the public +// key of the keypair it was signed with so that it can be statelessly validated +// by the receiver. +// +// The payload is prefixed with a byte string that determines the type, so it +// can be deserialized deterministically. Often, this byte string is a +// multicodec. +message Envelope { + // public_key is the public key of the keypair the enclosed payload was + // signed with. + crypto.pb.PublicKey public_key = 1; + + // payload_type encodes the type of payload, so that it can be deserialized + // deterministically. + bytes payload_type = 2; + + // payload is the actual payload carried inside this envelope. + bytes payload = 3; + + // signature is the signature produced by the private key corresponding to + // the enclosed public key, over the payload, prefixing a domain string for + // additional security. + bytes signature = 5; +} diff --git a/vendor/github.com/libp2p/go-libp2p-core/record/record.go b/vendor/github.com/libp2p/go-libp2p-core/record/record.go new file mode 100644 index 0000000000..212005780e --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p-core/record/record.go @@ -0,0 +1,102 @@ +package record + +import ( + "errors" + "reflect" +) + +var ( + // ErrPayloadTypeNotRegistered is returned from ConsumeEnvelope when the Envelope's + // PayloadType does not match any registered Record types. + ErrPayloadTypeNotRegistered = errors.New("payload type is not registered") + + payloadTypeRegistry = make(map[string]reflect.Type) +) + +// Record represents a data type that can be used as the payload of an Envelope. +// The Record interface defines the methods used to marshal and unmarshal a Record +// type to a byte slice. +// +// Record types may be "registered" as the default for a given Envelope.PayloadType +// using the RegisterType function. Once a Record type has been registered, +// an instance of that type will be created and used to unmarshal the payload of +// any Envelope with the registered PayloadType when the Envelope is opened using +// the ConsumeEnvelope function. +// +// To use an unregistered Record type instead, use ConsumeTypedEnvelope and pass in +// an instance of the Record type that you'd like the Envelope's payload to be +// unmarshaled into. +type Record interface { + + // Domain is the "signature domain" used when signing and verifying a particular + // Record type. The Domain string should be unique to your Record type, and all + // instances of the Record type must have the same Domain string. + Domain() string + + // Codec is a binary identifier for this type of record, ideally a registered multicodec + // (see https://github.com/multiformats/multicodec). + // When a Record is put into an Envelope (see record.Seal), the Codec value will be used + // as the Envelope's PayloadType. When the Envelope is later unsealed, the PayloadType + // will be used to lookup the correct Record type to unmarshal the Envelope payload into. + Codec() []byte + + // MarshalRecord converts a Record instance to a []byte, so that it can be used as an + // Envelope payload. + MarshalRecord() ([]byte, error) + + // UnmarshalRecord unmarshals a []byte payload into an instance of a particular Record type. + UnmarshalRecord([]byte) error +} + +// RegisterType associates a binary payload type identifier with a concrete +// Record type. This is used to automatically unmarshal Record payloads from Envelopes +// when using ConsumeEnvelope, and to automatically marshal Records and determine the +// correct PayloadType when calling Seal. +// +// Callers must provide an instance of the record type to be registered, which must be +// a pointer type. Registration should be done in the init function of the package +// where the Record type is defined: +// +// package hello_record +// import record "github.com/libp2p/go-libp2p-core/record" +// +// func init() { +// record.RegisterType(&HelloRecord{}) +// } +// +// type HelloRecord struct { } // etc.. +// +func RegisterType(prototype Record) { + payloadTypeRegistry[string(prototype.Codec())] = getValueType(prototype) +} + +func unmarshalRecordPayload(payloadType []byte, payloadBytes []byte) (Record, error) { + rec, err := blankRecordForPayloadType(payloadType) + if err != nil { + return nil, err + } + err = rec.UnmarshalRecord(payloadBytes) + if err != nil { + return nil, err + } + return rec, nil +} + +func blankRecordForPayloadType(payloadType []byte) (Record, error) { + valueType, ok := payloadTypeRegistry[string(payloadType)] + if !ok { + return nil, ErrPayloadTypeNotRegistered + } + + val := reflect.New(valueType) + asRecord := val.Interface().(Record) + return asRecord, nil +} + +func getValueType(i interface{}) reflect.Type { + valueType := reflect.TypeOf(i) + if valueType.Kind() == reflect.Ptr { + valueType = valueType.Elem() + } + return valueType +} diff --git a/vendor/github.com/libp2p/go-openssl/.gitignore b/vendor/github.com/libp2p/go-openssl/.gitignore new file mode 100644 index 0000000000..805d350b7e --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/.gitignore @@ -0,0 +1 @@ +openssl.test diff --git a/vendor/github.com/libp2p/go-openssl/AUTHORS b/vendor/github.com/libp2p/go-openssl/AUTHORS new file mode 100644 index 0000000000..a048c1ea16 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/AUTHORS @@ -0,0 +1,24 @@ +Andrew Brampton +Anton Baklanov +Carlos Martín Nieto +Charles Strahan +Christopher Dudley +Christopher Fredericks +Colin Misare +dequis +Gabriel Russell +Giulio +Jakob Unterwurzacher +Juuso Haavisto +kujenga +Phus Lu +Russ Egan +Ryan Hileman +Scott J. Goldman +Scott Kidder +Space Monkey, Inc +Stephen Gallagher +Viacheslav Biriukov +Zack Owens +Ramesh Rayaprolu +Paras Shah diff --git a/vendor/github.com/libp2p/go-openssl/LICENSE b/vendor/github.com/libp2p/go-openssl/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/libp2p/go-openssl/README.md b/vendor/github.com/libp2p/go-openssl/README.md new file mode 100644 index 0000000000..62ac7dcd6d --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/README.md @@ -0,0 +1,40 @@ +# OpenSSL bindings for Go + +Forked from https://github.com/spacemonkeygo/openssl (unmaintained) to add: + +1. FreeBSD support. +2. Key equality checking. +3. A function to get the size of signatures produced by a key. + +--- + +Please see http://godoc.org/github.com/libp2p/go-openssl for more info + +--- + +### License + +Copyright (C) 2017. See AUTHORS. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +### Using on macOS +1. Install [homebrew](http://brew.sh/) +2. `$ brew install openssl` or `$ brew install openssl@1.1` + +### Using on Windows +1. Install [mingw-w64](http://mingw-w64.sourceforge.net/) +2. Install [pkg-config-lite](http://sourceforge.net/projects/pkgconfiglite) +3. Build (or install precompiled) openssl for mingw32-w64 +4. Set __PKG\_CONFIG\_PATH__ to the directory containing openssl.pc + (i.e. c:\mingw64\mingw64\lib\pkgconfig) diff --git a/vendor/github.com/libp2p/go-openssl/bio.go b/vendor/github.com/libp2p/go-openssl/bio.go new file mode 100644 index 0000000000..9fe32aa803 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/bio.go @@ -0,0 +1,305 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "io" + "reflect" + "sync" + "unsafe" +) + +const ( + SSLRecordSize = 16 * 1024 +) + +func nonCopyGoBytes(ptr uintptr, length int) []byte { + var slice []byte + header := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) + header.Cap = length + header.Len = length + header.Data = ptr + return slice +} + +func nonCopyCString(data *C.char, size C.int) []byte { + return nonCopyGoBytes(uintptr(unsafe.Pointer(data)), int(size)) +} + +var writeBioMapping = newMapping() + +type writeBio struct { + data_mtx sync.Mutex + op_mtx sync.Mutex + buf []byte + release_buffers bool +} + +func loadWritePtr(b *C.BIO) *writeBio { + t := token(C.X_BIO_get_data(b)) + return (*writeBio)(writeBioMapping.Get(t)) +} + +func bioClearRetryFlags(b *C.BIO) { + C.X_BIO_clear_flags(b, C.BIO_FLAGS_RWS|C.BIO_FLAGS_SHOULD_RETRY) +} + +func bioSetRetryRead(b *C.BIO) { + C.X_BIO_set_flags(b, C.BIO_FLAGS_READ|C.BIO_FLAGS_SHOULD_RETRY) +} + +//export go_write_bio_write +func go_write_bio_write(b *C.BIO, data *C.char, size C.int) (rc C.int) { + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: writeBioWrite panic'd: %v", err) + rc = -1 + } + }() + ptr := loadWritePtr(b) + if ptr == nil || data == nil || size < 0 { + return -1 + } + ptr.data_mtx.Lock() + defer ptr.data_mtx.Unlock() + bioClearRetryFlags(b) + ptr.buf = append(ptr.buf, nonCopyCString(data, size)...) + return size +} + +//export go_write_bio_ctrl +func go_write_bio_ctrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) ( + rc C.long) { + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: writeBioCtrl panic'd: %v", err) + rc = -1 + } + }() + switch cmd { + case C.BIO_CTRL_WPENDING: + return writeBioPending(b) + case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH: + return 1 + default: + return 0 + } +} + +func writeBioPending(b *C.BIO) C.long { + ptr := loadWritePtr(b) + if ptr == nil { + return 0 + } + ptr.data_mtx.Lock() + defer ptr.data_mtx.Unlock() + return C.long(len(ptr.buf)) +} + +func (b *writeBio) WriteTo(w io.Writer) (rv int64, err error) { + b.op_mtx.Lock() + defer b.op_mtx.Unlock() + + // write whatever data we currently have + b.data_mtx.Lock() + data := b.buf + b.data_mtx.Unlock() + + if len(data) == 0 { + return 0, nil + } + n, err := w.Write(data) + + // subtract however much data we wrote from the buffer + b.data_mtx.Lock() + b.buf = b.buf[:copy(b.buf, b.buf[n:])] + if b.release_buffers && len(b.buf) == 0 { + b.buf = nil + } + b.data_mtx.Unlock() + + return int64(n), err +} + +func (self *writeBio) Disconnect(b *C.BIO) { + if loadWritePtr(b) == self { + writeBioMapping.Del(token(C.X_BIO_get_data(b))) + C.X_BIO_set_data(b, nil) + } +} + +func (b *writeBio) MakeCBIO() *C.BIO { + rv := C.X_BIO_new_write_bio() + token := writeBioMapping.Add(unsafe.Pointer(b)) + C.X_BIO_set_data(rv, unsafe.Pointer(token)) + return rv +} + +var readBioMapping = newMapping() + +type readBio struct { + data_mtx sync.Mutex + op_mtx sync.Mutex + buf []byte + eof bool + release_buffers bool +} + +func loadReadPtr(b *C.BIO) *readBio { + return (*readBio)(readBioMapping.Get(token(C.X_BIO_get_data(b)))) +} + +//export go_read_bio_read +func go_read_bio_read(b *C.BIO, data *C.char, size C.int) (rc C.int) { + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: go_read_bio_read panic'd: %v", err) + rc = -1 + } + }() + ptr := loadReadPtr(b) + if ptr == nil || size < 0 { + return -1 + } + ptr.data_mtx.Lock() + defer ptr.data_mtx.Unlock() + bioClearRetryFlags(b) + if len(ptr.buf) == 0 { + if ptr.eof { + return 0 + } + bioSetRetryRead(b) + return -1 + } + if size == 0 || data == nil { + return C.int(len(ptr.buf)) + } + n := copy(nonCopyCString(data, size), ptr.buf) + ptr.buf = ptr.buf[:copy(ptr.buf, ptr.buf[n:])] + if ptr.release_buffers && len(ptr.buf) == 0 { + ptr.buf = nil + } + return C.int(n) +} + +//export go_read_bio_ctrl +func go_read_bio_ctrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) ( + rc C.long) { + + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: readBioCtrl panic'd: %v", err) + rc = -1 + } + }() + switch cmd { + case C.BIO_CTRL_PENDING: + return readBioPending(b) + case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH: + return 1 + default: + return 0 + } +} + +func readBioPending(b *C.BIO) C.long { + ptr := loadReadPtr(b) + if ptr == nil { + return 0 + } + ptr.data_mtx.Lock() + defer ptr.data_mtx.Unlock() + return C.long(len(ptr.buf)) +} + +func (b *readBio) ReadFromOnce(r io.Reader) (n int, err error) { + b.op_mtx.Lock() + defer b.op_mtx.Unlock() + + // make sure we have a destination that fits at least one SSL record + b.data_mtx.Lock() + if cap(b.buf) < len(b.buf)+SSLRecordSize { + new_buf := make([]byte, len(b.buf), len(b.buf)+SSLRecordSize) + copy(new_buf, b.buf) + b.buf = new_buf + } + dst := b.buf[len(b.buf):cap(b.buf)] + dst_slice := b.buf + b.data_mtx.Unlock() + + n, err = r.Read(dst) + b.data_mtx.Lock() + defer b.data_mtx.Unlock() + if n > 0 { + if len(dst_slice) != len(b.buf) { + // someone shrunk the buffer, so we read in too far ahead and we + // need to slide backwards + copy(b.buf[len(b.buf):len(b.buf)+n], dst) + } + b.buf = b.buf[:len(b.buf)+n] + } + return n, err +} + +func (b *readBio) MakeCBIO() *C.BIO { + rv := C.X_BIO_new_read_bio() + token := readBioMapping.Add(unsafe.Pointer(b)) + C.X_BIO_set_data(rv, unsafe.Pointer(token)) + return rv +} + +func (self *readBio) Disconnect(b *C.BIO) { + if loadReadPtr(b) == self { + readBioMapping.Del(token(C.X_BIO_get_data(b))) + C.X_BIO_set_data(b, nil) + } +} + +func (b *readBio) MarkEOF() { + b.data_mtx.Lock() + defer b.data_mtx.Unlock() + b.eof = true +} + +type anyBio C.BIO + +func asAnyBio(b *C.BIO) *anyBio { return (*anyBio)(b) } + +func (b *anyBio) Read(buf []byte) (n int, err error) { + if len(buf) == 0 { + return 0, nil + } + n = int(C.X_BIO_read((*C.BIO)(b), unsafe.Pointer(&buf[0]), C.int(len(buf)))) + if n <= 0 { + return 0, io.EOF + } + return n, nil +} + +func (b *anyBio) Write(buf []byte) (written int, err error) { + if len(buf) == 0 { + return 0, nil + } + n := int(C.X_BIO_write((*C.BIO)(b), unsafe.Pointer(&buf[0]), + C.int(len(buf)))) + if n != len(buf) { + return n, errors.New("BIO write failed") + } + return n, nil +} diff --git a/vendor/github.com/libp2p/go-openssl/build.go b/vendor/github.com/libp2p/go-openssl/build.go new file mode 100644 index 0000000000..d3f19d82a0 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/build.go @@ -0,0 +1,24 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !openssl_static + +package openssl + +// #cgo linux windows freebsd openbsd solaris pkg-config: libssl libcrypto +// #cgo linux freebsd openbsd solaris CFLAGS: -Wno-deprecated-declarations +// #cgo darwin CFLAGS: -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/openssl/include -Wno-deprecated-declarations +// #cgo darwin LDFLAGS: -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/openssl/lib -lssl -lcrypto +// #cgo windows CFLAGS: -DWIN32_LEAN_AND_MEAN +import "C" diff --git a/vendor/github.com/libp2p/go-openssl/build_static.go b/vendor/github.com/libp2p/go-openssl/build_static.go new file mode 100644 index 0000000000..69fad0a4f2 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/build_static.go @@ -0,0 +1,24 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build openssl_static + +package openssl + +// #cgo linux windows freebsd openbsd solaris pkg-config: --static libssl libcrypto +// #cgo linux freebsd openbsd solaris CFLAGS: -Wno-deprecated-declarations +// #cgo darwin CFLAGS: -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/openssl/include -Wno-deprecated-declarations +// #cgo darwin LDFLAGS: -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/openssl/lib -lssl -lcrypto +// #cgo windows CFLAGS: -DWIN32_LEAN_AND_MEAN +import "C" diff --git a/vendor/github.com/libp2p/go-openssl/cert.go b/vendor/github.com/libp2p/go-openssl/cert.go new file mode 100644 index 0000000000..e841e22cc6 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/cert.go @@ -0,0 +1,415 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "io/ioutil" + "math/big" + "runtime" + "time" + "unsafe" +) + +type EVP_MD int + +const ( + EVP_NULL EVP_MD = iota + EVP_MD5 EVP_MD = iota + EVP_MD4 EVP_MD = iota + EVP_SHA EVP_MD = iota + EVP_SHA1 EVP_MD = iota + EVP_DSS EVP_MD = iota + EVP_DSS1 EVP_MD = iota + EVP_MDC2 EVP_MD = iota + EVP_RIPEMD160 EVP_MD = iota + EVP_SHA224 EVP_MD = iota + EVP_SHA256 EVP_MD = iota + EVP_SHA384 EVP_MD = iota + EVP_SHA512 EVP_MD = iota +) + +// X509_Version represents a version on an x509 certificate. +type X509_Version int + +// Specify constants for x509 versions because the standard states that they +// are represented internally as one lower than the common version name. +const ( + X509_V1 X509_Version = 0 + X509_V3 X509_Version = 2 +) + +type Certificate struct { + x *C.X509 + Issuer *Certificate + ref interface{} + pubKey PublicKey +} + +type CertificateInfo struct { + Serial *big.Int + Issued time.Duration + Expires time.Duration + Country string + Organization string + CommonName string +} + +type Name struct { + name *C.X509_NAME +} + +// Allocate and return a new Name object. +func NewName() (*Name, error) { + n := C.X509_NAME_new() + if n == nil { + return nil, errors.New("could not create x509 name") + } + name := &Name{name: n} + runtime.SetFinalizer(name, func(n *Name) { + C.X509_NAME_free(n.name) + }) + return name, nil +} + +// AddTextEntry appends a text entry to an X509 NAME. +func (n *Name) AddTextEntry(field, value string) error { + cfield := C.CString(field) + defer C.free(unsafe.Pointer(cfield)) + cvalue := (*C.uchar)(unsafe.Pointer(C.CString(value))) + defer C.free(unsafe.Pointer(cvalue)) + ret := C.X509_NAME_add_entry_by_txt( + n.name, cfield, C.MBSTRING_ASC, cvalue, -1, -1, 0) + if ret != 1 { + return errors.New("failed to add x509 name text entry") + } + return nil +} + +// AddTextEntries allows adding multiple entries to a name in one call. +func (n *Name) AddTextEntries(entries map[string]string) error { + for f, v := range entries { + if err := n.AddTextEntry(f, v); err != nil { + return err + } + } + return nil +} + +// GetEntry returns a name entry based on NID. If no entry, then ("", false) is +// returned. +func (n *Name) GetEntry(nid NID) (entry string, ok bool) { + entrylen := C.X509_NAME_get_text_by_NID(n.name, C.int(nid), nil, 0) + if entrylen == -1 { + return "", false + } + buf := (*C.char)(C.malloc(C.size_t(entrylen + 1))) + defer C.free(unsafe.Pointer(buf)) + C.X509_NAME_get_text_by_NID(n.name, C.int(nid), buf, entrylen+1) + return C.GoStringN(buf, entrylen), true +} + +// NewCertificate generates a basic certificate based +// on the provided CertificateInfo struct +func NewCertificate(info *CertificateInfo, key PublicKey) (*Certificate, error) { + c := &Certificate{x: C.X509_new()} + runtime.SetFinalizer(c, func(c *Certificate) { + C.X509_free(c.x) + }) + + name, err := c.GetSubjectName() + if err != nil { + return nil, err + } + err = name.AddTextEntries(map[string]string{ + "C": info.Country, + "O": info.Organization, + "CN": info.CommonName, + }) + if err != nil { + return nil, err + } + // self-issue for now + if err := c.SetIssuerName(name); err != nil { + return nil, err + } + if err := c.SetSerial(info.Serial); err != nil { + return nil, err + } + if err := c.SetIssueDate(info.Issued); err != nil { + return nil, err + } + if err := c.SetExpireDate(info.Expires); err != nil { + return nil, err + } + if err := c.SetPubKey(key); err != nil { + return nil, err + } + return c, nil +} + +func (c *Certificate) GetSubjectName() (*Name, error) { + n := C.X509_get_subject_name(c.x) + if n == nil { + return nil, errors.New("failed to get subject name") + } + return &Name{name: n}, nil +} + +func (c *Certificate) GetIssuerName() (*Name, error) { + n := C.X509_get_issuer_name(c.x) + if n == nil { + return nil, errors.New("failed to get issuer name") + } + return &Name{name: n}, nil +} + +func (c *Certificate) SetSubjectName(name *Name) error { + if C.X509_set_subject_name(c.x, name.name) != 1 { + return errors.New("failed to set subject name") + } + return nil +} + +// SetIssuer updates the stored Issuer cert +// and the internal x509 Issuer Name of a certificate. +// The stored Issuer reference is used when adding extensions. +func (c *Certificate) SetIssuer(issuer *Certificate) error { + name, err := issuer.GetSubjectName() + if err != nil { + return err + } + if err = c.SetIssuerName(name); err != nil { + return err + } + c.Issuer = issuer + return nil +} + +// SetIssuerName populates the issuer name of a certificate. +// Use SetIssuer instead, if possible. +func (c *Certificate) SetIssuerName(name *Name) error { + if C.X509_set_issuer_name(c.x, name.name) != 1 { + return errors.New("failed to set subject name") + } + return nil +} + +// SetSerial sets the serial of a certificate. +func (c *Certificate) SetSerial(serial *big.Int) error { + sno := C.ASN1_INTEGER_new() + defer C.ASN1_INTEGER_free(sno) + bn := C.BN_new() + defer C.BN_free(bn) + + serialBytes := serial.Bytes() + if bn = C.BN_bin2bn((*C.uchar)(unsafe.Pointer(&serialBytes[0])), C.int(len(serialBytes)), bn); bn == nil { + return errors.New("failed to set serial") + } + if sno = C.BN_to_ASN1_INTEGER(bn, sno); sno == nil { + return errors.New("failed to set serial") + } + if C.X509_set_serialNumber(c.x, sno) != 1 { + return errors.New("failed to set serial") + } + return nil +} + +// SetIssueDate sets the certificate issue date relative to the current time. +func (c *Certificate) SetIssueDate(when time.Duration) error { + offset := C.long(when / time.Second) + result := C.X509_gmtime_adj(C.X_X509_get0_notBefore(c.x), offset) + if result == nil { + return errors.New("failed to set issue date") + } + return nil +} + +// SetExpireDate sets the certificate issue date relative to the current time. +func (c *Certificate) SetExpireDate(when time.Duration) error { + offset := C.long(when / time.Second) + result := C.X509_gmtime_adj(C.X_X509_get0_notAfter(c.x), offset) + if result == nil { + return errors.New("failed to set expire date") + } + return nil +} + +// SetPubKey assigns a new public key to a certificate. +func (c *Certificate) SetPubKey(pubKey PublicKey) error { + c.pubKey = pubKey + if C.X509_set_pubkey(c.x, pubKey.evpPKey()) != 1 { + return errors.New("failed to set public key") + } + return nil +} + +// Sign a certificate using a private key and a digest name. +// Accepted digest names are 'sha256', 'sha384', and 'sha512'. +func (c *Certificate) Sign(privKey PrivateKey, digest EVP_MD) error { + switch digest { + case EVP_SHA256: + case EVP_SHA384: + case EVP_SHA512: + default: + return errors.New("Unsupported digest" + + "You're probably looking for 'EVP_SHA256' or 'EVP_SHA512'.") + } + return c.insecureSign(privKey, digest) +} + +func (c *Certificate) insecureSign(privKey PrivateKey, digest EVP_MD) error { + var md *C.EVP_MD = getDigestFunction(digest) + if C.X509_sign(c.x, privKey.evpPKey(), md) <= 0 { + return errors.New("failed to sign certificate") + } + return nil +} + +func getDigestFunction(digest EVP_MD) (md *C.EVP_MD) { + switch digest { + // please don't use these digest functions + case EVP_NULL: + md = C.X_EVP_md_null() + case EVP_MD5: + md = C.X_EVP_md5() + case EVP_SHA: + md = C.X_EVP_sha() + case EVP_SHA1: + md = C.X_EVP_sha1() + case EVP_DSS: + md = C.X_EVP_dss() + case EVP_DSS1: + md = C.X_EVP_dss1() + case EVP_RIPEMD160: + md = C.X_EVP_ripemd160() + case EVP_SHA224: + md = C.X_EVP_sha224() + // you actually want one of these + case EVP_SHA256: + md = C.X_EVP_sha256() + case EVP_SHA384: + md = C.X_EVP_sha384() + case EVP_SHA512: + md = C.X_EVP_sha512() + } + return md +} + +// Add an extension to a certificate. +// Extension constants are NID_* as found in openssl. +func (c *Certificate) AddExtension(nid NID, value string) error { + issuer := c + if c.Issuer != nil { + issuer = c.Issuer + } + var ctx C.X509V3_CTX + C.X509V3_set_ctx(&ctx, c.x, issuer.x, nil, nil, 0) + ex := C.X509V3_EXT_conf_nid(nil, &ctx, C.int(nid), C.CString(value)) + if ex == nil { + return errors.New("failed to create x509v3 extension") + } + defer C.X509_EXTENSION_free(ex) + if C.X509_add_ext(c.x, ex, -1) <= 0 { + return errors.New("failed to add x509v3 extension") + } + return nil +} + +// Wraps AddExtension using a map of NID to text extension. +// Will return without finishing if it encounters an error. +func (c *Certificate) AddExtensions(extensions map[NID]string) error { + for nid, value := range extensions { + if err := c.AddExtension(nid, value); err != nil { + return err + } + } + return nil +} + +// LoadCertificateFromPEM loads an X509 certificate from a PEM-encoded block. +func LoadCertificateFromPEM(pem_block []byte) (*Certificate, error) { + if len(pem_block) == 0 { + return nil, errors.New("empty pem block") + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), + C.int(len(pem_block))) + cert := C.PEM_read_bio_X509(bio, nil, nil, nil) + C.BIO_free(bio) + if cert == nil { + return nil, errorFromErrorQueue() + } + x := &Certificate{x: cert} + runtime.SetFinalizer(x, func(x *Certificate) { + C.X509_free(x.x) + }) + return x, nil +} + +// MarshalPEM converts the X509 certificate to PEM-encoded format +func (c *Certificate) MarshalPEM() (pem_block []byte, err error) { + bio := C.BIO_new(C.BIO_s_mem()) + if bio == nil { + return nil, errors.New("failed to allocate memory BIO") + } + defer C.BIO_free(bio) + if int(C.PEM_write_bio_X509(bio, c.x)) != 1 { + return nil, errors.New("failed dumping certificate") + } + return ioutil.ReadAll(asAnyBio(bio)) +} + +// PublicKey returns the public key embedded in the X509 certificate. +func (c *Certificate) PublicKey() (PublicKey, error) { + pkey := C.X509_get_pubkey(c.x) + if pkey == nil { + return nil, errors.New("no public key found") + } + key := &pKey{key: pkey} + runtime.SetFinalizer(key, func(key *pKey) { + C.EVP_PKEY_free(key.key) + }) + return key, nil +} + +// GetSerialNumberHex returns the certificate's serial number in hex format +func (c *Certificate) GetSerialNumberHex() (serial string) { + asn1_i := C.X509_get_serialNumber(c.x) + bignum := C.ASN1_INTEGER_to_BN(asn1_i, nil) + hex := C.BN_bn2hex(bignum) + serial = C.GoString(hex) + C.BN_free(bignum) + C.X_OPENSSL_free(unsafe.Pointer(hex)) + return +} + +// GetVersion returns the X509 version of the certificate. +func (c *Certificate) GetVersion() X509_Version { + return X509_Version(C.X_X509_get_version(c.x)) +} + +// SetVersion sets the X509 version of the certificate. +func (c *Certificate) SetVersion(version X509_Version) error { + cvers := C.long(version) + if C.X_X509_set_version(c.x, cvers) != 1 { + return errors.New("failed to set certificate version") + } + return nil +} diff --git a/vendor/github.com/libp2p/go-openssl/ciphers.go b/vendor/github.com/libp2p/go-openssl/ciphers.go new file mode 100644 index 0000000000..509bf6410f --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/ciphers.go @@ -0,0 +1,335 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "fmt" + "runtime" + "unsafe" +) + +const ( + GCM_TAG_MAXLEN = 16 +) + +type CipherCtx interface { + Cipher() *Cipher + BlockSize() int + KeySize() int + IVSize() int +} + +type Cipher struct { + ptr *C.EVP_CIPHER +} + +func (c *Cipher) Nid() NID { + return NID(C.X_EVP_CIPHER_nid(c.ptr)) +} + +func (c *Cipher) ShortName() (string, error) { + return Nid2ShortName(c.Nid()) +} + +func (c *Cipher) BlockSize() int { + return int(C.X_EVP_CIPHER_block_size(c.ptr)) +} + +func (c *Cipher) KeySize() int { + return int(C.X_EVP_CIPHER_key_length(c.ptr)) +} + +func (c *Cipher) IVSize() int { + return int(C.X_EVP_CIPHER_iv_length(c.ptr)) +} + +func Nid2ShortName(nid NID) (string, error) { + sn := C.OBJ_nid2sn(C.int(nid)) + if sn == nil { + return "", fmt.Errorf("NID %d not found", nid) + } + return C.GoString(sn), nil +} + +func GetCipherByName(name string) (*Cipher, error) { + cname := C.CString(name) + defer C.free(unsafe.Pointer(cname)) + p := C.EVP_get_cipherbyname(cname) + if p == nil { + return nil, fmt.Errorf("Cipher %v not found", name) + } + // we can consider ciphers to use static mem; don't need to free + return &Cipher{ptr: p}, nil +} + +func GetCipherByNid(nid NID) (*Cipher, error) { + sn, err := Nid2ShortName(nid) + if err != nil { + return nil, err + } + return GetCipherByName(sn) +} + +type cipherCtx struct { + ctx *C.EVP_CIPHER_CTX +} + +func newCipherCtx() (*cipherCtx, error) { + cctx := C.EVP_CIPHER_CTX_new() + if cctx == nil { + return nil, errors.New("failed to allocate cipher context") + } + ctx := &cipherCtx{cctx} + runtime.SetFinalizer(ctx, func(ctx *cipherCtx) { + C.EVP_CIPHER_CTX_free(ctx.ctx) + }) + return ctx, nil +} + +func (ctx *cipherCtx) applyKeyAndIV(key, iv []byte) error { + var kptr, iptr *C.uchar + if key != nil { + if len(key) != ctx.KeySize() { + return fmt.Errorf("bad key size (%d bytes instead of %d)", + len(key), ctx.KeySize()) + } + kptr = (*C.uchar)(&key[0]) + } + if iv != nil { + if len(iv) != ctx.IVSize() { + return fmt.Errorf("bad IV size (%d bytes instead of %d)", + len(iv), ctx.IVSize()) + } + iptr = (*C.uchar)(&iv[0]) + } + if kptr != nil || iptr != nil { + var res C.int + if C.X_EVP_CIPHER_CTX_encrypting(ctx.ctx) != 0 { + res = C.EVP_EncryptInit_ex(ctx.ctx, nil, nil, kptr, iptr) + } else { + res = C.EVP_DecryptInit_ex(ctx.ctx, nil, nil, kptr, iptr) + } + if 1 != res { + return errors.New("failed to apply key/IV") + } + } + return nil +} + +func (ctx *cipherCtx) Cipher() *Cipher { + return &Cipher{ptr: C.X_EVP_CIPHER_CTX_cipher(ctx.ctx)} +} + +func (ctx *cipherCtx) BlockSize() int { + return int(C.X_EVP_CIPHER_CTX_block_size(ctx.ctx)) +} + +func (ctx *cipherCtx) KeySize() int { + return int(C.X_EVP_CIPHER_CTX_key_length(ctx.ctx)) +} + +func (ctx *cipherCtx) IVSize() int { + return int(C.X_EVP_CIPHER_CTX_iv_length(ctx.ctx)) +} + +func (ctx *cipherCtx) SetPadding(pad bool) { + if pad { + C.X_EVP_CIPHER_CTX_set_padding(ctx.ctx, 1) + } else { + C.X_EVP_CIPHER_CTX_set_padding(ctx.ctx, 0) + } +} + +func (ctx *cipherCtx) setCtrl(code, arg int) error { + res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), nil) + if res != 1 { + return fmt.Errorf("failed to set code %d to %d [result %d]", + code, arg, res) + } + return nil +} + +func (ctx *cipherCtx) setCtrlBytes(code, arg int, value []byte) error { + res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), + unsafe.Pointer(&value[0])) + if res != 1 { + return fmt.Errorf("failed to set code %d with arg %d to %x [result %d]", + code, arg, value, res) + } + return nil +} + +func (ctx *cipherCtx) getCtrlInt(code, arg int) (int, error) { + var returnVal C.int + res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), + unsafe.Pointer(&returnVal)) + if res != 1 { + return 0, fmt.Errorf("failed to get code %d with arg %d [result %d]", + code, arg, res) + } + return int(returnVal), nil +} + +func (ctx *cipherCtx) getCtrlBytes(code, arg, expectsize int) ([]byte, error) { + returnVal := make([]byte, expectsize) + res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), + unsafe.Pointer(&returnVal[0])) + if res != 1 { + return nil, fmt.Errorf("failed to get code %d with arg %d [result %d]", + code, arg, res) + } + return returnVal, nil +} + +type EncryptionCipherCtx interface { + CipherCtx + + // pass in plaintext, get back ciphertext. can be called + // multiple times as needed + EncryptUpdate(input []byte) ([]byte, error) + + // call after all plaintext has been passed in; may return + // additional ciphertext if needed to finish off a block + // or extra padding information + EncryptFinal() ([]byte, error) +} + +type DecryptionCipherCtx interface { + CipherCtx + + // pass in ciphertext, get back plaintext. can be called + // multiple times as needed + DecryptUpdate(input []byte) ([]byte, error) + + // call after all ciphertext has been passed in; may return + // additional plaintext if needed to finish off a block + DecryptFinal() ([]byte, error) +} + +type encryptionCipherCtx struct { + *cipherCtx +} + +type decryptionCipherCtx struct { + *cipherCtx +} + +func newEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( + *encryptionCipherCtx, error) { + if c == nil { + return nil, errors.New("null cipher not allowed") + } + ctx, err := newCipherCtx() + if err != nil { + return nil, err + } + var eptr *C.ENGINE + if e != nil { + eptr = e.e + } + if 1 != C.EVP_EncryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) { + return nil, errors.New("failed to initialize cipher context") + } + err = ctx.applyKeyAndIV(key, iv) + if err != nil { + return nil, err + } + return &encryptionCipherCtx{cipherCtx: ctx}, nil +} + +func newDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( + *decryptionCipherCtx, error) { + if c == nil { + return nil, errors.New("null cipher not allowed") + } + ctx, err := newCipherCtx() + if err != nil { + return nil, err + } + var eptr *C.ENGINE + if e != nil { + eptr = e.e + } + if 1 != C.EVP_DecryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) { + return nil, errors.New("failed to initialize cipher context") + } + err = ctx.applyKeyAndIV(key, iv) + if err != nil { + return nil, err + } + return &decryptionCipherCtx{cipherCtx: ctx}, nil +} + +func NewEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( + EncryptionCipherCtx, error) { + return newEncryptionCipherCtx(c, e, key, iv) +} + +func NewDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( + DecryptionCipherCtx, error) { + return newDecryptionCipherCtx(c, e, key, iv) +} + +func (ctx *encryptionCipherCtx) EncryptUpdate(input []byte) ([]byte, error) { + if len(input) == 0 { + return nil, nil + } + outbuf := make([]byte, len(input)+ctx.BlockSize()) + outlen := C.int(len(outbuf)) + res := C.EVP_EncryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen, + (*C.uchar)(&input[0]), C.int(len(input))) + if res != 1 { + return nil, fmt.Errorf("failed to encrypt [result %d]", res) + } + return outbuf[:outlen], nil +} + +func (ctx *decryptionCipherCtx) DecryptUpdate(input []byte) ([]byte, error) { + if len(input) == 0 { + return nil, nil + } + outbuf := make([]byte, len(input)+ctx.BlockSize()) + outlen := C.int(len(outbuf)) + res := C.EVP_DecryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen, + (*C.uchar)(&input[0]), C.int(len(input))) + if res != 1 { + return nil, fmt.Errorf("failed to decrypt [result %d]", res) + } + return outbuf[:outlen], nil +} + +func (ctx *encryptionCipherCtx) EncryptFinal() ([]byte, error) { + outbuf := make([]byte, ctx.BlockSize()) + var outlen C.int + if 1 != C.EVP_EncryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) { + return nil, errors.New("encryption failed") + } + return outbuf[:outlen], nil +} + +func (ctx *decryptionCipherCtx) DecryptFinal() ([]byte, error) { + outbuf := make([]byte, ctx.BlockSize()) + var outlen C.int + if 1 != C.EVP_DecryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) { + // this may mean the tag failed to verify- all previous plaintext + // returned must be considered faked and invalid + return nil, errors.New("decryption failed") + } + return outbuf[:outlen], nil +} diff --git a/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go b/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go new file mode 100644 index 0000000000..7b08e0fd99 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go @@ -0,0 +1,152 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include +import "C" + +import ( + "errors" + "fmt" +) + +type AuthenticatedEncryptionCipherCtx interface { + EncryptionCipherCtx + + // data passed in to ExtraData() is part of the final output; it is + // not encrypted itself, but is part of the authenticated data. when + // decrypting or authenticating, pass back with the decryption + // context's ExtraData() + ExtraData([]byte) error + + // use after finalizing encryption to get the authenticating tag + GetTag() ([]byte, error) +} + +type AuthenticatedDecryptionCipherCtx interface { + DecryptionCipherCtx + + // pass in any extra data that was added during encryption with the + // encryption context's ExtraData() + ExtraData([]byte) error + + // use before finalizing decryption to tell the library what the + // tag is expected to be + SetTag([]byte) error +} + +type authEncryptionCipherCtx struct { + *encryptionCipherCtx +} + +type authDecryptionCipherCtx struct { + *decryptionCipherCtx +} + +func getGCMCipher(blocksize int) (*Cipher, error) { + var cipherptr *C.EVP_CIPHER + switch blocksize { + case 256: + cipherptr = C.EVP_aes_256_gcm() + case 192: + cipherptr = C.EVP_aes_192_gcm() + case 128: + cipherptr = C.EVP_aes_128_gcm() + default: + return nil, fmt.Errorf("unknown block size %d", blocksize) + } + return &Cipher{ptr: cipherptr}, nil +} + +func NewGCMEncryptionCipherCtx(blocksize int, e *Engine, key, iv []byte) ( + AuthenticatedEncryptionCipherCtx, error) { + cipher, err := getGCMCipher(blocksize) + if err != nil { + return nil, err + } + ctx, err := newEncryptionCipherCtx(cipher, e, key, nil) + if err != nil { + return nil, err + } + if len(iv) > 0 { + err := ctx.setCtrl(C.EVP_CTRL_GCM_SET_IVLEN, len(iv)) + if err != nil { + return nil, fmt.Errorf("could not set IV len to %d: %s", + len(iv), err) + } + if 1 != C.EVP_EncryptInit_ex(ctx.ctx, nil, nil, nil, + (*C.uchar)(&iv[0])) { + return nil, errors.New("failed to apply IV") + } + } + return &authEncryptionCipherCtx{encryptionCipherCtx: ctx}, nil +} + +func NewGCMDecryptionCipherCtx(blocksize int, e *Engine, key, iv []byte) ( + AuthenticatedDecryptionCipherCtx, error) { + cipher, err := getGCMCipher(blocksize) + if err != nil { + return nil, err + } + ctx, err := newDecryptionCipherCtx(cipher, e, key, nil) + if err != nil { + return nil, err + } + if len(iv) > 0 { + err := ctx.setCtrl(C.EVP_CTRL_GCM_SET_IVLEN, len(iv)) + if err != nil { + return nil, fmt.Errorf("could not set IV len to %d: %s", + len(iv), err) + } + if 1 != C.EVP_DecryptInit_ex(ctx.ctx, nil, nil, nil, + (*C.uchar)(&iv[0])) { + return nil, errors.New("failed to apply IV") + } + } + return &authDecryptionCipherCtx{decryptionCipherCtx: ctx}, nil +} + +func (ctx *authEncryptionCipherCtx) ExtraData(aad []byte) error { + if aad == nil { + return nil + } + var outlen C.int + if 1 != C.EVP_EncryptUpdate(ctx.ctx, nil, &outlen, (*C.uchar)(&aad[0]), + C.int(len(aad))) { + return errors.New("failed to add additional authenticated data") + } + return nil +} + +func (ctx *authDecryptionCipherCtx) ExtraData(aad []byte) error { + if aad == nil { + return nil + } + var outlen C.int + if 1 != C.EVP_DecryptUpdate(ctx.ctx, nil, &outlen, (*C.uchar)(&aad[0]), + C.int(len(aad))) { + return errors.New("failed to add additional authenticated data") + } + return nil +} + +func (ctx *authEncryptionCipherCtx) GetTag() ([]byte, error) { + return ctx.getCtrlBytes(C.EVP_CTRL_GCM_GET_TAG, GCM_TAG_MAXLEN, + GCM_TAG_MAXLEN) +} + +func (ctx *authDecryptionCipherCtx) SetTag(tag []byte) error { + return ctx.setCtrlBytes(C.EVP_CTRL_GCM_SET_TAG, len(tag), tag) +} diff --git a/vendor/github.com/libp2p/go-openssl/conn.go b/vendor/github.com/libp2p/go-openssl/conn.go new file mode 100644 index 0000000000..2758034193 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/conn.go @@ -0,0 +1,620 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "fmt" + "io" + "net" + "runtime" + "sync" + "time" + "unsafe" + + "github.com/libp2p/go-openssl/utils" +) + +var ( + zeroReturn = errors.New("zero return") + wantRead = errors.New("want read") + wantWrite = errors.New("want write") + tryAgain = errors.New("try again") +) + +type Conn struct { + *SSL + + conn net.Conn + ctx *Ctx // for gc + into_ssl *readBio + from_ssl *writeBio + is_shutdown bool + mtx sync.Mutex + want_read_future *utils.Future +} + +type VerifyResult int + +const ( + Ok VerifyResult = C.X509_V_OK + UnableToGetIssuerCert VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT + UnableToGetCrl VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL + UnableToDecryptCertSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE + UnableToDecryptCrlSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE + UnableToDecodeIssuerPublicKey VerifyResult = C.X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY + CertSignatureFailure VerifyResult = C.X509_V_ERR_CERT_SIGNATURE_FAILURE + CrlSignatureFailure VerifyResult = C.X509_V_ERR_CRL_SIGNATURE_FAILURE + CertNotYetValid VerifyResult = C.X509_V_ERR_CERT_NOT_YET_VALID + CertHasExpired VerifyResult = C.X509_V_ERR_CERT_HAS_EXPIRED + CrlNotYetValid VerifyResult = C.X509_V_ERR_CRL_NOT_YET_VALID + CrlHasExpired VerifyResult = C.X509_V_ERR_CRL_HAS_EXPIRED + ErrorInCertNotBeforeField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD + ErrorInCertNotAfterField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD + ErrorInCrlLastUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD + ErrorInCrlNextUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD + OutOfMem VerifyResult = C.X509_V_ERR_OUT_OF_MEM + DepthZeroSelfSignedCert VerifyResult = C.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT + SelfSignedCertInChain VerifyResult = C.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN + UnableToGetIssuerCertLocally VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY + UnableToVerifyLeafSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE + CertChainTooLong VerifyResult = C.X509_V_ERR_CERT_CHAIN_TOO_LONG + CertRevoked VerifyResult = C.X509_V_ERR_CERT_REVOKED + InvalidCa VerifyResult = C.X509_V_ERR_INVALID_CA + PathLengthExceeded VerifyResult = C.X509_V_ERR_PATH_LENGTH_EXCEEDED + InvalidPurpose VerifyResult = C.X509_V_ERR_INVALID_PURPOSE + CertUntrusted VerifyResult = C.X509_V_ERR_CERT_UNTRUSTED + CertRejected VerifyResult = C.X509_V_ERR_CERT_REJECTED + SubjectIssuerMismatch VerifyResult = C.X509_V_ERR_SUBJECT_ISSUER_MISMATCH + AkidSkidMismatch VerifyResult = C.X509_V_ERR_AKID_SKID_MISMATCH + AkidIssuerSerialMismatch VerifyResult = C.X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH + KeyusageNoCertsign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CERTSIGN + UnableToGetCrlIssuer VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER + UnhandledCriticalExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION + KeyusageNoCrlSign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CRL_SIGN + UnhandledCriticalCrlExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION + InvalidNonCa VerifyResult = C.X509_V_ERR_INVALID_NON_CA + ProxyPathLengthExceeded VerifyResult = C.X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED + KeyusageNoDigitalSignature VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE + ProxyCertificatesNotAllowed VerifyResult = C.X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED + InvalidExtension VerifyResult = C.X509_V_ERR_INVALID_EXTENSION + InvalidPolicyExtension VerifyResult = C.X509_V_ERR_INVALID_POLICY_EXTENSION + NoExplicitPolicy VerifyResult = C.X509_V_ERR_NO_EXPLICIT_POLICY + UnnestedResource VerifyResult = C.X509_V_ERR_UNNESTED_RESOURCE + ApplicationVerification VerifyResult = C.X509_V_ERR_APPLICATION_VERIFICATION +) + +func newSSL(ctx *C.SSL_CTX) (*C.SSL, error) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + ssl := C.SSL_new(ctx) + if ssl == nil { + return nil, errorFromErrorQueue() + } + return ssl, nil +} + +func newConn(conn net.Conn, ctx *Ctx) (*Conn, error) { + ssl, err := newSSL(ctx.ctx) + if err != nil { + return nil, err + } + + into_ssl := &readBio{} + from_ssl := &writeBio{} + + if ctx.GetMode()&ReleaseBuffers > 0 { + into_ssl.release_buffers = true + from_ssl.release_buffers = true + } + + into_ssl_cbio := into_ssl.MakeCBIO() + from_ssl_cbio := from_ssl.MakeCBIO() + if into_ssl_cbio == nil || from_ssl_cbio == nil { + // these frees are null safe + C.BIO_free(into_ssl_cbio) + C.BIO_free(from_ssl_cbio) + C.SSL_free(ssl) + return nil, errors.New("failed to allocate memory BIO") + } + + // the ssl object takes ownership of these objects now + C.SSL_set_bio(ssl, into_ssl_cbio, from_ssl_cbio) + + s := &SSL{ssl: ssl} + C.SSL_set_ex_data(s.ssl, get_ssl_idx(), unsafe.Pointer(s)) + + c := &Conn{ + SSL: s, + + conn: conn, + ctx: ctx, + into_ssl: into_ssl, + from_ssl: from_ssl} + runtime.SetFinalizer(c, func(c *Conn) { + c.into_ssl.Disconnect(into_ssl_cbio) + c.from_ssl.Disconnect(from_ssl_cbio) + C.SSL_free(c.ssl) + }) + return c, nil +} + +// Client wraps an existing stream connection and puts it in the connect state +// for any subsequent handshakes. +// +// IMPORTANT NOTE: if you use this method instead of Dial to construct an SSL +// connection, you are responsible for verifying the peer's hostname. +// Otherwise, you are vulnerable to MITM attacks. +// +// Client also does not set up SNI for you like Dial does. +// +// Client connections probably won't work for you unless you set a verify +// location or add some certs to the certificate store of the client context +// you're using. This library is not nice enough to use the system certificate +// store by default for you yet. +func Client(conn net.Conn, ctx *Ctx) (*Conn, error) { + c, err := newConn(conn, ctx) + if err != nil { + return nil, err + } + C.SSL_set_connect_state(c.ssl) + return c, nil +} + +// Server wraps an existing stream connection and puts it in the accept state +// for any subsequent handshakes. +func Server(conn net.Conn, ctx *Ctx) (*Conn, error) { + c, err := newConn(conn, ctx) + if err != nil { + return nil, err + } + C.SSL_set_accept_state(c.ssl) + return c, nil +} + +func (c *Conn) GetCtx() *Ctx { return c.ctx } + +func (c *Conn) CurrentCipher() (string, error) { + p := C.X_SSL_get_cipher_name(c.ssl) + if p == nil { + return "", errors.New("Session not established") + } + + return C.GoString(p), nil +} + +func (c *Conn) fillInputBuffer() error { + for { + n, err := c.into_ssl.ReadFromOnce(c.conn) + if n == 0 && err == nil { + continue + } + if err == io.EOF { + c.into_ssl.MarkEOF() + return c.Close() + } + return err + } +} + +func (c *Conn) flushOutputBuffer() error { + _, err := c.from_ssl.WriteTo(c.conn) + return err +} + +func (c *Conn) getErrorHandler(rv C.int, errno error) func() error { + errcode := C.SSL_get_error(c.ssl, rv) + switch errcode { + case C.SSL_ERROR_ZERO_RETURN: + return func() error { + c.Close() + return io.ErrUnexpectedEOF + } + case C.SSL_ERROR_WANT_READ: + go c.flushOutputBuffer() + if c.want_read_future != nil { + want_read_future := c.want_read_future + return func() error { + _, err := want_read_future.Get() + return err + } + } + c.want_read_future = utils.NewFuture() + want_read_future := c.want_read_future + return func() (err error) { + defer func() { + c.mtx.Lock() + c.want_read_future = nil + c.mtx.Unlock() + want_read_future.Set(nil, err) + }() + err = c.fillInputBuffer() + if err != nil { + return err + } + return tryAgain + } + case C.SSL_ERROR_WANT_WRITE: + return func() error { + err := c.flushOutputBuffer() + if err != nil { + return err + } + return tryAgain + } + case C.SSL_ERROR_SYSCALL: + var err error + if C.ERR_peek_error() == 0 { + switch rv { + case 0: + err = errors.New("protocol-violating EOF") + case -1: + err = errno + default: + err = errorFromErrorQueue() + } + } else { + err = errorFromErrorQueue() + } + return func() error { return err } + default: + err := errorFromErrorQueue() + return func() error { return err } + } +} + +func (c *Conn) handleError(errcb func() error) error { + if errcb != nil { + return errcb() + } + return nil +} + +func (c *Conn) handshake() func() error { + c.mtx.Lock() + defer c.mtx.Unlock() + if c.is_shutdown { + return func() error { return io.ErrUnexpectedEOF } + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + rv, errno := C.SSL_do_handshake(c.ssl) + if rv > 0 { + return nil + } + return c.getErrorHandler(rv, errno) +} + +// Handshake performs an SSL handshake. If a handshake is not manually +// triggered, it will run before the first I/O on the encrypted stream. +func (c *Conn) Handshake() error { + err := tryAgain + for err == tryAgain { + err = c.handleError(c.handshake()) + } + go c.flushOutputBuffer() + return err +} + +// PeerCertificate returns the Certificate of the peer with which you're +// communicating. Only valid after a handshake. +func (c *Conn) PeerCertificate() (*Certificate, error) { + c.mtx.Lock() + defer c.mtx.Unlock() + if c.is_shutdown { + return nil, errors.New("connection closed") + } + x := C.SSL_get_peer_certificate(c.ssl) + if x == nil { + return nil, errors.New("no peer certificate found") + } + cert := &Certificate{x: x} + runtime.SetFinalizer(cert, func(cert *Certificate) { + C.X509_free(cert.x) + }) + return cert, nil +} + +// loadCertificateStack loads up a stack of x509 certificates and returns them, +// handling memory ownership. +func (c *Conn) loadCertificateStack(sk *C.struct_stack_st_X509) ( + rv []*Certificate) { + + sk_num := int(C.X_sk_X509_num(sk)) + rv = make([]*Certificate, 0, sk_num) + for i := 0; i < sk_num; i++ { + x := C.X_sk_X509_value(sk, C.int(i)) + // ref holds on to the underlying connection memory so we don't need to + // worry about incrementing refcounts manually or freeing the X509 + rv = append(rv, &Certificate{x: x, ref: c}) + } + return rv +} + +// PeerCertificateChain returns the certificate chain of the peer. If called on +// the client side, the stack also contains the peer's certificate; if called +// on the server side, the peer's certificate must be obtained separately using +// PeerCertificate. +func (c *Conn) PeerCertificateChain() (rv []*Certificate, err error) { + c.mtx.Lock() + defer c.mtx.Unlock() + if c.is_shutdown { + return nil, errors.New("connection closed") + } + sk := C.SSL_get_peer_cert_chain(c.ssl) + if sk == nil { + return nil, errors.New("no peer certificates found") + } + return c.loadCertificateStack(sk), nil +} + +type ConnectionState struct { + Certificate *Certificate + CertificateError error + CertificateChain []*Certificate + CertificateChainError error + SessionReused bool +} + +func (c *Conn) ConnectionState() (rv ConnectionState) { + rv.Certificate, rv.CertificateError = c.PeerCertificate() + rv.CertificateChain, rv.CertificateChainError = c.PeerCertificateChain() + rv.SessionReused = c.SessionReused() + return +} + +func (c *Conn) shutdown() func() error { + c.mtx.Lock() + defer c.mtx.Unlock() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + rv, errno := C.SSL_shutdown(c.ssl) + if rv > 0 { + return nil + } + if rv == 0 { + // The OpenSSL docs say that in this case, the shutdown is not + // finished, and we should call SSL_shutdown() a second time, if a + // bidirectional shutdown is going to be performed. Further, the + // output of SSL_get_error may be misleading, as an erroneous + // SSL_ERROR_SYSCALL may be flagged even though no error occurred. + // So, TODO: revisit bidrectional shutdown, possibly trying again. + // Note: some broken clients won't engage in bidirectional shutdown + // without tickling them to close by sending a TCP_FIN packet, or + // shutting down the write-side of the connection. + return nil + } else { + return c.getErrorHandler(rv, errno) + } +} + +func (c *Conn) shutdownLoop() error { + err := tryAgain + shutdown_tries := 0 + for err == tryAgain { + shutdown_tries = shutdown_tries + 1 + err = c.handleError(c.shutdown()) + if err == nil { + return c.flushOutputBuffer() + } + if err == tryAgain && shutdown_tries >= 2 { + return errors.New("shutdown requested a third time?") + } + } + if err == io.ErrUnexpectedEOF { + err = nil + } + return err +} + +// Close shuts down the SSL connection and closes the underlying wrapped +// connection. +func (c *Conn) Close() error { + c.mtx.Lock() + if c.is_shutdown { + c.mtx.Unlock() + return nil + } + c.is_shutdown = true + c.mtx.Unlock() + var errs utils.ErrorGroup + errs.Add(c.shutdownLoop()) + errs.Add(c.conn.Close()) + return errs.Finalize() +} + +func (c *Conn) read(b []byte) (int, func() error) { + if len(b) == 0 { + return 0, nil + } + c.mtx.Lock() + defer c.mtx.Unlock() + if c.is_shutdown { + return 0, func() error { return io.EOF } + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + rv, errno := C.SSL_read(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b))) + if rv > 0 { + return int(rv), nil + } + return 0, c.getErrorHandler(rv, errno) +} + +// Read reads up to len(b) bytes into b. It returns the number of bytes read +// and an error if applicable. io.EOF is returned when the caller can expect +// to see no more data. +func (c *Conn) Read(b []byte) (n int, err error) { + if len(b) == 0 { + return 0, nil + } + err = tryAgain + for err == tryAgain { + n, errcb := c.read(b) + err = c.handleError(errcb) + if err == nil { + go c.flushOutputBuffer() + return n, nil + } + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + } + return 0, err +} + +func (c *Conn) write(b []byte) (int, func() error) { + if len(b) == 0 { + return 0, nil + } + c.mtx.Lock() + defer c.mtx.Unlock() + if c.is_shutdown { + err := errors.New("connection closed") + return 0, func() error { return err } + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + rv, errno := C.SSL_write(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b))) + if rv > 0 { + return int(rv), nil + } + return 0, c.getErrorHandler(rv, errno) +} + +// Write will encrypt the contents of b and write it to the underlying stream. +// Performance will be vastly improved if the size of b is a multiple of +// SSLRecordSize. +func (c *Conn) Write(b []byte) (written int, err error) { + if len(b) == 0 { + return 0, nil + } + err = tryAgain + for err == tryAgain { + n, errcb := c.write(b) + err = c.handleError(errcb) + if err == nil { + return n, c.flushOutputBuffer() + } + } + return 0, err +} + +// VerifyHostname pulls the PeerCertificate and calls VerifyHostname on the +// certificate. +func (c *Conn) VerifyHostname(host string) error { + cert, err := c.PeerCertificate() + if err != nil { + return err + } + return cert.VerifyHostname(host) +} + +// LocalAddr returns the underlying connection's local address +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the underlying connection's remote address +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// SetDeadline calls SetDeadline on the underlying connection. +func (c *Conn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +// SetReadDeadline calls SetReadDeadline on the underlying connection. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetWriteDeadline calls SetWriteDeadline on the underlying connection. +func (c *Conn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +func (c *Conn) SetTlsExtHostName(name string) error { + cname := C.CString(name) + defer C.free(unsafe.Pointer(cname)) + runtime.LockOSThread() + defer runtime.UnlockOSThread() + if C.X_SSL_set_tlsext_host_name(c.ssl, cname) == 0 { + return errorFromErrorQueue() + } + return nil +} + +func (c *Conn) VerifyResult() VerifyResult { + return VerifyResult(C.SSL_get_verify_result(c.ssl)) +} + +func (c *Conn) SessionReused() bool { + return C.X_SSL_session_reused(c.ssl) == 1 +} + +func (c *Conn) GetSession() ([]byte, error) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // get1 increases the refcount of the session, so we have to free it. + session := (*C.SSL_SESSION)(C.SSL_get1_session(c.ssl)) + if session == nil { + return nil, errors.New("failed to get session") + } + defer C.SSL_SESSION_free(session) + + // get the size of the encoding + slen := C.i2d_SSL_SESSION(session, nil) + + buf := (*C.uchar)(C.malloc(C.size_t(slen))) + defer C.free(unsafe.Pointer(buf)) + + // this modifies the value of buf (seriously), so we have to pass in a temp + // var so that we can actually read the bytes from buf. + tmp := buf + slen2 := C.i2d_SSL_SESSION(session, &tmp) + if slen != slen2 { + return nil, errors.New("session had different lengths") + } + + return C.GoBytes(unsafe.Pointer(buf), slen), nil +} + +func (c *Conn) setSession(session []byte) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + ptr := (*C.uchar)(&session[0]) + s := C.d2i_SSL_SESSION(nil, &ptr, C.long(len(session))) + if s == nil { + return fmt.Errorf("unable to load session: %s", errorFromErrorQueue()) + } + defer C.SSL_SESSION_free(s) + + ret := C.SSL_set_session(c.ssl, s) + if ret != 1 { + return fmt.Errorf("unable to set session: %s", errorFromErrorQueue()) + } + return nil +} diff --git a/vendor/github.com/libp2p/go-openssl/ctx.go b/vendor/github.com/libp2p/go-openssl/ctx.go new file mode 100644 index 0000000000..33befc401e --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/ctx.go @@ -0,0 +1,568 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "runtime" + "sync" + "time" + "unsafe" + + "github.com/spacemonkeygo/spacelog" +) + +var ( + ssl_ctx_idx = C.X_SSL_CTX_new_index() + + logger = spacelog.GetLogger() +) + +type Ctx struct { + ctx *C.SSL_CTX + cert *Certificate + chain []*Certificate + key PrivateKey + verify_cb VerifyCallback + sni_cb TLSExtServernameCallback + + ticket_store_mu sync.Mutex + ticket_store *TicketStore +} + +//export get_ssl_ctx_idx +func get_ssl_ctx_idx() C.int { + return ssl_ctx_idx +} + +func newCtx(method *C.SSL_METHOD) (*Ctx, error) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + ctx := C.SSL_CTX_new(method) + if ctx == nil { + return nil, errorFromErrorQueue() + } + c := &Ctx{ctx: ctx} + C.SSL_CTX_set_ex_data(ctx, get_ssl_ctx_idx(), unsafe.Pointer(c)) + runtime.SetFinalizer(c, func(c *Ctx) { + C.SSL_CTX_free(c.ctx) + }) + return c, nil +} + +type SSLVersion int + +const ( + SSLv3 SSLVersion = 0x02 // Vulnerable to "POODLE" attack. + TLSv1 SSLVersion = 0x03 + TLSv1_1 SSLVersion = 0x04 + TLSv1_2 SSLVersion = 0x05 + + // Make sure to disable SSLv2 and SSLv3 if you use this. SSLv3 is vulnerable + // to the "POODLE" attack, and SSLv2 is what, just don't even. + AnyVersion SSLVersion = 0x06 +) + +// NewCtxWithVersion creates an SSL context that is specific to the provided +// SSL version. See http://www.openssl.org/docs/ssl/SSL_CTX_new.html for more. +func NewCtxWithVersion(version SSLVersion) (*Ctx, error) { + var method *C.SSL_METHOD + switch version { + case SSLv3: + method = C.X_SSLv3_method() + case TLSv1: + method = C.X_TLSv1_method() + case TLSv1_1: + method = C.X_TLSv1_1_method() + case TLSv1_2: + method = C.X_TLSv1_2_method() + case AnyVersion: + method = C.X_SSLv23_method() + } + if method == nil { + return nil, errors.New("unknown ssl/tls version") + } + return newCtx(method) +} + +// NewCtx creates a context that supports any TLS version 1.0 and newer. +func NewCtx() (*Ctx, error) { + c, err := NewCtxWithVersion(AnyVersion) + if err == nil { + c.SetOptions(NoSSLv2 | NoSSLv3) + } + return c, err +} + +// NewCtxFromFiles calls NewCtx, loads the provided files, and configures the +// context to use them. +func NewCtxFromFiles(cert_file string, key_file string) (*Ctx, error) { + ctx, err := NewCtx() + if err != nil { + return nil, err + } + + cert_bytes, err := ioutil.ReadFile(cert_file) + if err != nil { + return nil, err + } + + certs := SplitPEM(cert_bytes) + if len(certs) == 0 { + return nil, fmt.Errorf("No PEM certificate found in '%s'", cert_file) + } + first, certs := certs[0], certs[1:] + cert, err := LoadCertificateFromPEM(first) + if err != nil { + return nil, err + } + + err = ctx.UseCertificate(cert) + if err != nil { + return nil, err + } + + for _, pem := range certs { + cert, err := LoadCertificateFromPEM(pem) + if err != nil { + return nil, err + } + err = ctx.AddChainCertificate(cert) + if err != nil { + return nil, err + } + } + + key_bytes, err := ioutil.ReadFile(key_file) + if err != nil { + return nil, err + } + + key, err := LoadPrivateKeyFromPEM(key_bytes) + if err != nil { + return nil, err + } + + err = ctx.UsePrivateKey(key) + if err != nil { + return nil, err + } + + return ctx, nil +} + +// EllipticCurve repesents the ASN.1 OID of an elliptic curve. +// see https://www.openssl.org/docs/apps/ecparam.html for a list of implemented curves. +type EllipticCurve int + +const ( + // P-256: X9.62/SECG curve over a 256 bit prime field + Prime256v1 EllipticCurve = C.NID_X9_62_prime256v1 + // P-384: NIST/SECG curve over a 384 bit prime field + Secp384r1 EllipticCurve = C.NID_secp384r1 + // P-521: NIST/SECG curve over a 521 bit prime field + Secp521r1 EllipticCurve = C.NID_secp521r1 +) + +// SetEllipticCurve sets the elliptic curve used by the SSL context to +// enable an ECDH cipher suite to be selected during the handshake. +func (c *Ctx) SetEllipticCurve(curve EllipticCurve) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + k := C.EC_KEY_new_by_curve_name(C.int(curve)) + if k == nil { + return errors.New("Unknown curve") + } + defer C.EC_KEY_free(k) + + if int(C.X_SSL_CTX_set_tmp_ecdh(c.ctx, k)) != 1 { + return errorFromErrorQueue() + } + + return nil +} + +// UseCertificate configures the context to present the given certificate to +// peers. +func (c *Ctx) UseCertificate(cert *Certificate) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + c.cert = cert + if int(C.SSL_CTX_use_certificate(c.ctx, cert.x)) != 1 { + return errorFromErrorQueue() + } + return nil +} + +// AddChainCertificate adds a certificate to the chain presented in the +// handshake. +func (c *Ctx) AddChainCertificate(cert *Certificate) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + c.chain = append(c.chain, cert) + if int(C.X_SSL_CTX_add_extra_chain_cert(c.ctx, cert.x)) != 1 { + return errorFromErrorQueue() + } + // OpenSSL takes ownership via SSL_CTX_add_extra_chain_cert + runtime.SetFinalizer(cert, nil) + return nil +} + +// UsePrivateKey configures the context to use the given private key for SSL +// handshakes. +func (c *Ctx) UsePrivateKey(key PrivateKey) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + c.key = key + if int(C.SSL_CTX_use_PrivateKey(c.ctx, key.evpPKey())) != 1 { + return errorFromErrorQueue() + } + return nil +} + +type CertificateStore struct { + store *C.X509_STORE + // for GC + ctx *Ctx + certs []*Certificate +} + +// Allocate a new, empty CertificateStore +func NewCertificateStore() (*CertificateStore, error) { + s := C.X509_STORE_new() + if s == nil { + return nil, errors.New("failed to allocate X509_STORE") + } + store := &CertificateStore{store: s} + runtime.SetFinalizer(store, func(s *CertificateStore) { + C.X509_STORE_free(s.store) + }) + return store, nil +} + +// Parse a chained PEM file, loading all certificates into the Store. +func (s *CertificateStore) LoadCertificatesFromPEM(data []byte) error { + pems := SplitPEM(data) + for _, pem := range pems { + cert, err := LoadCertificateFromPEM(pem) + if err != nil { + return err + } + err = s.AddCertificate(cert) + if err != nil { + return err + } + } + return nil +} + +// GetCertificateStore returns the context's certificate store that will be +// used for peer validation. +func (c *Ctx) GetCertificateStore() *CertificateStore { + // we don't need to dealloc the cert store pointer here, because it points + // to a ctx internal. so we do need to keep the ctx around + return &CertificateStore{ + store: C.SSL_CTX_get_cert_store(c.ctx), + ctx: c} +} + +// AddCertificate marks the provided Certificate as a trusted certificate in +// the given CertificateStore. +func (s *CertificateStore) AddCertificate(cert *Certificate) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + s.certs = append(s.certs, cert) + if int(C.X509_STORE_add_cert(s.store, cert.x)) != 1 { + return errorFromErrorQueue() + } + return nil +} + +type CertificateStoreCtx struct { + ctx *C.X509_STORE_CTX + ssl_ctx *Ctx +} + +func (self *CertificateStoreCtx) VerifyResult() VerifyResult { + return VerifyResult(C.X509_STORE_CTX_get_error(self.ctx)) +} + +func (self *CertificateStoreCtx) Err() error { + code := C.X509_STORE_CTX_get_error(self.ctx) + if code == C.X509_V_OK { + return nil + } + return fmt.Errorf("openssl: %s", + C.GoString(C.X509_verify_cert_error_string(C.long(code)))) +} + +func (self *CertificateStoreCtx) Depth() int { + return int(C.X509_STORE_CTX_get_error_depth(self.ctx)) +} + +// the certicate returned is only valid for the lifetime of the underlying +// X509_STORE_CTX +func (self *CertificateStoreCtx) GetCurrentCert() *Certificate { + x509 := C.X509_STORE_CTX_get_current_cert(self.ctx) + if x509 == nil { + return nil + } + // add a ref + if 1 != C.X_X509_add_ref(x509) { + return nil + } + cert := &Certificate{ + x: x509, + } + runtime.SetFinalizer(cert, func(cert *Certificate) { + C.X509_free(cert.x) + }) + return cert +} + +// LoadVerifyLocations tells the context to trust all certificate authorities +// provided in either the ca_file or the ca_path. +// See http://www.openssl.org/docs/ssl/SSL_CTX_load_verify_locations.html for +// more. +func (c *Ctx) LoadVerifyLocations(ca_file string, ca_path string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + var c_ca_file, c_ca_path *C.char + if ca_file != "" { + c_ca_file = C.CString(ca_file) + defer C.free(unsafe.Pointer(c_ca_file)) + } + if ca_path != "" { + c_ca_path = C.CString(ca_path) + defer C.free(unsafe.Pointer(c_ca_path)) + } + if C.SSL_CTX_load_verify_locations(c.ctx, c_ca_file, c_ca_path) != 1 { + return errorFromErrorQueue() + } + return nil +} + +type Options int + +const ( + // NoCompression is only valid if you are using OpenSSL 1.0.1 or newer + NoCompression Options = C.SSL_OP_NO_COMPRESSION + NoSSLv2 Options = C.SSL_OP_NO_SSLv2 + NoSSLv3 Options = C.SSL_OP_NO_SSLv3 + NoTLSv1 Options = C.SSL_OP_NO_TLSv1 + CipherServerPreference Options = C.SSL_OP_CIPHER_SERVER_PREFERENCE + NoSessionResumptionOrRenegotiation Options = C.SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION + NoTicket Options = C.SSL_OP_NO_TICKET +) + +// SetOptions sets context options. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html +func (c *Ctx) SetOptions(options Options) Options { + return Options(C.X_SSL_CTX_set_options( + c.ctx, C.long(options))) +} + +func (c *Ctx) ClearOptions(options Options) Options { + return Options(C.X_SSL_CTX_clear_options( + c.ctx, C.long(options))) +} + +// GetOptions returns context options. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html +func (c *Ctx) GetOptions() Options { + return Options(C.X_SSL_CTX_get_options(c.ctx)) +} + +type Modes int + +const ( + // ReleaseBuffers is only valid if you are using OpenSSL 1.0.1 or newer + ReleaseBuffers Modes = C.SSL_MODE_RELEASE_BUFFERS +) + +// SetMode sets context modes. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html +func (c *Ctx) SetMode(modes Modes) Modes { + return Modes(C.X_SSL_CTX_set_mode(c.ctx, C.long(modes))) +} + +// GetMode returns context modes. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html +func (c *Ctx) GetMode() Modes { + return Modes(C.X_SSL_CTX_get_mode(c.ctx)) +} + +type VerifyOptions int + +const ( + VerifyNone VerifyOptions = C.SSL_VERIFY_NONE + VerifyPeer VerifyOptions = C.SSL_VERIFY_PEER + VerifyFailIfNoPeerCert VerifyOptions = C.SSL_VERIFY_FAIL_IF_NO_PEER_CERT + VerifyClientOnce VerifyOptions = C.SSL_VERIFY_CLIENT_ONCE +) + +type VerifyCallback func(ok bool, store *CertificateStoreCtx) bool + +//export go_ssl_ctx_verify_cb_thunk +func go_ssl_ctx_verify_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int { + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: verify callback panic'd: %v", err) + os.Exit(1) + } + }() + verify_cb := (*Ctx)(p).verify_cb + // set up defaults just in case verify_cb is nil + if verify_cb != nil { + store := &CertificateStoreCtx{ctx: ctx} + if verify_cb(ok == 1, store) { + ok = 1 + } else { + ok = 0 + } + } + return ok +} + +// SetVerify controls peer verification settings. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (c *Ctx) SetVerify(options VerifyOptions, verify_cb VerifyCallback) { + c.verify_cb = verify_cb + if verify_cb != nil { + C.SSL_CTX_set_verify(c.ctx, C.int(options), (*[0]byte)(C.X_SSL_CTX_verify_cb)) + } else { + C.SSL_CTX_set_verify(c.ctx, C.int(options), nil) + } +} + +func (c *Ctx) SetVerifyMode(options VerifyOptions) { + c.SetVerify(options, c.verify_cb) +} + +func (c *Ctx) SetVerifyCallback(verify_cb VerifyCallback) { + c.SetVerify(c.VerifyMode(), verify_cb) +} + +func (c *Ctx) GetVerifyCallback() VerifyCallback { + return c.verify_cb +} + +func (c *Ctx) VerifyMode() VerifyOptions { + return VerifyOptions(C.SSL_CTX_get_verify_mode(c.ctx)) +} + +// SetVerifyDepth controls how many certificates deep the certificate +// verification logic is willing to follow a certificate chain. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (c *Ctx) SetVerifyDepth(depth int) { + C.SSL_CTX_set_verify_depth(c.ctx, C.int(depth)) +} + +// GetVerifyDepth controls how many certificates deep the certificate +// verification logic is willing to follow a certificate chain. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (c *Ctx) GetVerifyDepth() int { + return int(C.SSL_CTX_get_verify_depth(c.ctx)) +} + +type TLSExtServernameCallback func(ssl *SSL) SSLTLSExtErr + +// SetTLSExtServernameCallback sets callback function for Server Name Indication +// (SNI) rfc6066 (http://tools.ietf.org/html/rfc6066). See +// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni +func (c *Ctx) SetTLSExtServernameCallback(sni_cb TLSExtServernameCallback) { + c.sni_cb = sni_cb + C.X_SSL_CTX_set_tlsext_servername_callback(c.ctx, (*[0]byte)(C.sni_cb)) +} + +func (c *Ctx) SetSessionId(session_id []byte) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + var ptr *C.uchar + if len(session_id) > 0 { + ptr = (*C.uchar)(unsafe.Pointer(&session_id[0])) + } + if int(C.SSL_CTX_set_session_id_context(c.ctx, ptr, + C.uint(len(session_id)))) == 0 { + return errorFromErrorQueue() + } + return nil +} + +// SetCipherList sets the list of available ciphers. The format of the list is +// described at http://www.openssl.org/docs/apps/ciphers.html, but see +// http://www.openssl.org/docs/ssl/SSL_CTX_set_cipher_list.html for more. +func (c *Ctx) SetCipherList(list string) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + clist := C.CString(list) + defer C.free(unsafe.Pointer(clist)) + if int(C.SSL_CTX_set_cipher_list(c.ctx, clist)) == 0 { + return errorFromErrorQueue() + } + return nil +} + +type SessionCacheModes int + +const ( + SessionCacheOff SessionCacheModes = C.SSL_SESS_CACHE_OFF + SessionCacheClient SessionCacheModes = C.SSL_SESS_CACHE_CLIENT + SessionCacheServer SessionCacheModes = C.SSL_SESS_CACHE_SERVER + SessionCacheBoth SessionCacheModes = C.SSL_SESS_CACHE_BOTH + NoAutoClear SessionCacheModes = C.SSL_SESS_CACHE_NO_AUTO_CLEAR + NoInternalLookup SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP + NoInternalStore SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_STORE + NoInternal SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL +) + +// SetSessionCacheMode enables or disables session caching. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_session_cache_mode.html +func (c *Ctx) SetSessionCacheMode(modes SessionCacheModes) SessionCacheModes { + return SessionCacheModes( + C.X_SSL_CTX_set_session_cache_mode(c.ctx, C.long(modes))) +} + +// Set session cache timeout. Returns previously set value. +// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html +func (c *Ctx) SetTimeout(t time.Duration) time.Duration { + prev := C.X_SSL_CTX_set_timeout(c.ctx, C.long(t/time.Second)) + return time.Duration(prev) * time.Second +} + +// Get session cache timeout. +// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html +func (c *Ctx) GetTimeout() time.Duration { + return time.Duration(C.X_SSL_CTX_get_timeout(c.ctx)) * time.Second +} + +// Set session cache size. Returns previously set value. +// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html +func (c *Ctx) SessSetCacheSize(t int) int { + return int(C.X_SSL_CTX_sess_set_cache_size(c.ctx, C.long(t))) +} + +// Get session cache size. +// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html +func (c *Ctx) SessGetCacheSize() int { + return int(C.X_SSL_CTX_sess_get_cache_size(c.ctx)) +} diff --git a/vendor/github.com/libp2p/go-openssl/dh.go b/vendor/github.com/libp2p/go-openssl/dh.go new file mode 100644 index 0000000000..75ac5ad426 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/dh.go @@ -0,0 +1,66 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" +import ( + "errors" + "unsafe" +) + +// DeriveSharedSecret derives a shared secret using a private key and a peer's +// public key. +// The specific algorithm that is used depends on the types of the +// keys, but it is most commonly a variant of Diffie-Hellman. +func DeriveSharedSecret(private PrivateKey, public PublicKey) ([]byte, error) { + // Create context for the shared secret derivation + dhCtx := C.EVP_PKEY_CTX_new(private.evpPKey(), nil) + if dhCtx == nil { + return nil, errors.New("failed creating shared secret derivation context") + } + defer C.EVP_PKEY_CTX_free(dhCtx) + + // Initialize the context + if int(C.EVP_PKEY_derive_init(dhCtx)) != 1 { + return nil, errors.New("failed initializing shared secret derivation context") + } + + // Provide the peer's public key + if int(C.EVP_PKEY_derive_set_peer(dhCtx, public.evpPKey())) != 1 { + return nil, errors.New("failed adding peer public key to context") + } + + // Determine how large of a buffer we need for the shared secret + var buffLen C.size_t + if int(C.EVP_PKEY_derive(dhCtx, nil, &buffLen)) != 1 { + return nil, errors.New("failed determining shared secret length") + } + + // Allocate a buffer + buffer := C.X_OPENSSL_malloc(buffLen) + if buffer == nil { + return nil, errors.New("failed allocating buffer for shared secret") + } + defer C.X_OPENSSL_free(buffer) + + // Derive the shared secret + if int(C.EVP_PKEY_derive(dhCtx, (*C.uchar)(buffer), &buffLen)) != 1 { + return nil, errors.New("failed deriving the shared secret") + } + + secret := C.GoBytes(unsafe.Pointer(buffer), C.int(buffLen)) + return secret, nil +} diff --git a/vendor/github.com/libp2p/go-openssl/dhparam.go b/vendor/github.com/libp2p/go-openssl/dhparam.go new file mode 100644 index 0000000000..294d0645c0 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/dhparam.go @@ -0,0 +1,64 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "runtime" + "unsafe" +) + +type DH struct { + dh *C.struct_dh_st +} + +// LoadDHParametersFromPEM loads the Diffie-Hellman parameters from +// a PEM-encoded block. +func LoadDHParametersFromPEM(pem_block []byte) (*DH, error) { + if len(pem_block) == 0 { + return nil, errors.New("empty pem block") + } + bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), + C.int(len(pem_block))) + if bio == nil { + return nil, errors.New("failed creating bio") + } + defer C.BIO_free(bio) + + params := C.PEM_read_bio_DHparams(bio, nil, nil, nil) + if params == nil { + return nil, errors.New("failed reading dh parameters") + } + dhparams := &DH{dh: params} + runtime.SetFinalizer(dhparams, func(dhparams *DH) { + C.DH_free(dhparams.dh) + }) + return dhparams, nil +} + +// SetDHParameters sets the DH group (DH parameters) used to +// negotiate an emphemeral DH key during handshaking. +func (c *Ctx) SetDHParameters(dh *DH) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + if int(C.X_SSL_CTX_set_tmp_dh(c.ctx, dh.dh)) != 1 { + return errorFromErrorQueue() + } + return nil +} diff --git a/vendor/github.com/libp2p/go-openssl/digest.go b/vendor/github.com/libp2p/go-openssl/digest.go new file mode 100644 index 0000000000..6d8d2635ae --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/digest.go @@ -0,0 +1,51 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "fmt" + "unsafe" +) + +// Digest represents and openssl message digest. +type Digest struct { + ptr *C.EVP_MD +} + +// GetDigestByName returns the Digest with the name or nil and an error if the +// digest was not found. +func GetDigestByName(name string) (*Digest, error) { + cname := C.CString(name) + defer C.free(unsafe.Pointer(cname)) + p := C.X_EVP_get_digestbyname(cname) + if p == nil { + return nil, fmt.Errorf("Digest %v not found", name) + } + // we can consider digests to use static mem; don't need to free + return &Digest{ptr: p}, nil +} + +// GetDigestByName returns the Digest with the NID or nil and an error if the +// digest was not found. +func GetDigestByNid(nid NID) (*Digest, error) { + sn, err := Nid2ShortName(nid) + if err != nil { + return nil, err + } + return GetDigestByName(sn) +} diff --git a/vendor/github.com/libp2p/go-openssl/engine.go b/vendor/github.com/libp2p/go-openssl/engine.go new file mode 100644 index 0000000000..78aef956fc --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/engine.go @@ -0,0 +1,50 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +/* +#include "openssl/engine.h" +*/ +import "C" + +import ( + "fmt" + "runtime" + "unsafe" +) + +type Engine struct { + e *C.ENGINE +} + +func EngineById(name string) (*Engine, error) { + cname := C.CString(name) + defer C.free(unsafe.Pointer(cname)) + e := &Engine{ + e: C.ENGINE_by_id(cname), + } + if e.e == nil { + return nil, fmt.Errorf("engine %s missing", name) + } + if C.ENGINE_init(e.e) == 0 { + C.ENGINE_free(e.e) + return nil, fmt.Errorf("engine %s not initialized", name) + } + runtime.SetFinalizer(e, func(e *Engine) { + C.ENGINE_finish(e.e) + C.ENGINE_free(e.e) + }) + return e, nil +} diff --git a/vendor/github.com/libp2p/go-openssl/fips.go b/vendor/github.com/libp2p/go-openssl/fips.go new file mode 100644 index 0000000000..f65e14d3ef --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/fips.go @@ -0,0 +1,39 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +/* +#include +*/ +import "C" +import "runtime" + +// FIPSModeSet enables a FIPS 140-2 validated mode of operation. +// https://wiki.openssl.org/index.php/FIPS_mode_set() +func FIPSModeSet(mode bool) error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var r C.int + if mode { + r = C.FIPS_mode_set(1) + } else { + r = C.FIPS_mode_set(0) + } + if r != 1 { + return errorFromErrorQueue() + } + return nil +} diff --git a/vendor/github.com/libp2p/go-openssl/go.mod b/vendor/github.com/libp2p/go-openssl/go.mod new file mode 100644 index 0000000000..51068e7f57 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/go.mod @@ -0,0 +1,8 @@ +module github.com/libp2p/go-openssl + +require ( + github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 + golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb // indirect +) + +go 1.12 diff --git a/vendor/github.com/libp2p/go-openssl/go.sum b/vendor/github.com/libp2p/go-openssl/go.sum new file mode 100644 index 0000000000..3b2b650f82 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/go.sum @@ -0,0 +1,4 @@ +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/libp2p/go-openssl/hmac.go b/vendor/github.com/libp2p/go-openssl/hmac.go new file mode 100644 index 0000000000..a8640cfac6 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/hmac.go @@ -0,0 +1,91 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "runtime" + "unsafe" +) + +type HMAC struct { + ctx *C.HMAC_CTX + engine *Engine + md *C.EVP_MD +} + +func NewHMAC(key []byte, digestAlgorithm EVP_MD) (*HMAC, error) { + return NewHMACWithEngine(key, digestAlgorithm, nil) +} + +func NewHMACWithEngine(key []byte, digestAlgorithm EVP_MD, e *Engine) (*HMAC, error) { + var md *C.EVP_MD = getDigestFunction(digestAlgorithm) + h := &HMAC{engine: e, md: md} + h.ctx = C.X_HMAC_CTX_new() + if h.ctx == nil { + return nil, errors.New("unable to allocate HMAC_CTX") + } + + var c_e *C.ENGINE + if e != nil { + c_e = e.e + } + if rc := C.X_HMAC_Init_ex(h.ctx, + unsafe.Pointer(&key[0]), + C.int(len(key)), + md, + c_e); rc != 1 { + C.X_HMAC_CTX_free(h.ctx) + return nil, errors.New("failed to initialize HMAC_CTX") + } + + runtime.SetFinalizer(h, func(h *HMAC) { h.Close() }) + return h, nil +} + +func (h *HMAC) Close() { + C.X_HMAC_CTX_free(h.ctx) +} + +func (h *HMAC) Write(data []byte) (n int, err error) { + if len(data) == 0 { + return 0, nil + } + if rc := C.X_HMAC_Update(h.ctx, (*C.uchar)(unsafe.Pointer(&data[0])), + C.size_t(len(data))); rc != 1 { + return 0, errors.New("failed to update HMAC") + } + return len(data), nil +} + +func (h *HMAC) Reset() error { + if 1 != C.X_HMAC_Init_ex(h.ctx, nil, 0, nil, nil) { + return errors.New("failed to reset HMAC_CTX") + } + return nil +} + +func (h *HMAC) Final() (result []byte, err error) { + mdLength := C.X_EVP_MD_size(h.md) + result = make([]byte, mdLength) + if rc := C.X_HMAC_Final(h.ctx, (*C.uchar)(unsafe.Pointer(&result[0])), + (*C.uint)(unsafe.Pointer(&mdLength))); rc != 1 { + return nil, errors.New("failed to finalized HMAC") + } + return result, h.Reset() +} diff --git a/vendor/github.com/libp2p/go-openssl/hostname.c b/vendor/github.com/libp2p/go-openssl/hostname.c new file mode 100644 index 0000000000..0bffecad69 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/hostname.c @@ -0,0 +1,373 @@ +/* + * Go-OpenSSL notice: + * This file is required for all OpenSSL versions prior to 1.1.0. This simply + * provides the new 1.1.0 X509_check_* methods for hostname validation if they + * don't already exist. + */ + +#include + +#ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT + +/* portions from x509v3.h and v3_utl.c */ +/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL + * project. + */ +/* ==================================================================== + * Copyright (c) 1999-2003 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * licensing@OpenSSL.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ +/* X509 v3 extension utilities */ + +#include +#include +#include +#include +#include + +#define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1 +#define X509_CHECK_FLAG_NO_WILDCARDS 0x2 + +typedef int (*equal_fn)(const unsigned char *pattern, size_t pattern_len, + const unsigned char *subject, size_t subject_len); + +/* Compare while ASCII ignoring case. */ +static int equal_nocase(const unsigned char *pattern, size_t pattern_len, + const unsigned char *subject, size_t subject_len) + { + if (pattern_len != subject_len) + return 0; + while (pattern_len) + { + unsigned char l = *pattern; + unsigned char r = *subject; + /* The pattern must not contain NUL characters. */ + if (l == 0) + return 0; + if (l != r) + { + if ('A' <= l && l <= 'Z') + l = (l - 'A') + 'a'; + if ('A' <= r && r <= 'Z') + r = (r - 'A') + 'a'; + if (l != r) + return 0; + } + ++pattern; + ++subject; + --pattern_len; + } + return 1; + } + +/* Compare using memcmp. */ +static int equal_case(const unsigned char *pattern, size_t pattern_len, + const unsigned char *subject, size_t subject_len) +{ + /* The pattern must not contain NUL characters. */ + if (memchr(pattern, '\0', pattern_len) != NULL) + return 0; + if (pattern_len != subject_len) + return 0; + return !memcmp(pattern, subject, pattern_len); +} + +/* RFC 5280, section 7.5, requires that only the domain is compared in + a case-insensitive manner. */ +static int equal_email(const unsigned char *a, size_t a_len, + const unsigned char *b, size_t b_len) + { + size_t i = a_len; + if (a_len != b_len) + return 0; + /* We search backwards for the '@' character, so that we do + not have to deal with quoted local-parts. The domain part + is compared in a case-insensitive manner. */ + while (i > 0) + { + --i; + if (a[i] == '@' || b[i] == '@') + { + if (!equal_nocase(a + i, a_len - i, + b + i, a_len - i)) + return 0; + break; + } + } + if (i == 0) + i = a_len; + return equal_case(a, i, b, i); + } + +/* Compare the prefix and suffix with the subject, and check that the + characters in-between are valid. */ +static int wildcard_match(const unsigned char *prefix, size_t prefix_len, + const unsigned char *suffix, size_t suffix_len, + const unsigned char *subject, size_t subject_len) + { + const unsigned char *wildcard_start; + const unsigned char *wildcard_end; + const unsigned char *p; + if (subject_len < prefix_len + suffix_len) + return 0; + if (!equal_nocase(prefix, prefix_len, subject, prefix_len)) + return 0; + wildcard_start = subject + prefix_len; + wildcard_end = subject + (subject_len - suffix_len); + if (!equal_nocase(wildcard_end, suffix_len, suffix, suffix_len)) + return 0; + /* The wildcard must match at least one character. */ + if (wildcard_start == wildcard_end) + return 0; + /* Check that the part matched by the wildcard contains only + permitted characters and only matches a single label. */ + for (p = wildcard_start; p != wildcard_end; ++p) + if (!(('0' <= *p && *p <= '9') || + ('A' <= *p && *p <= 'Z') || + ('a' <= *p && *p <= 'z') || + *p == '-')) + return 0; + return 1; + } + +/* Checks if the memory region consistens of [0-9A-Za-z.-]. */ +static int valid_domain_characters(const unsigned char *p, size_t len) + { + while (len) + { + if (!(('0' <= *p && *p <= '9') || + ('A' <= *p && *p <= 'Z') || + ('a' <= *p && *p <= 'z') || + *p == '-' || *p == '.')) + return 0; + ++p; + --len; + } + return 1; + } + +/* Find the '*' in a wildcard pattern. If no such character is found + or the pattern is otherwise invalid, returns NULL. */ +static const unsigned char *wildcard_find_star(const unsigned char *pattern, + size_t pattern_len) + { + const unsigned char *star = memchr(pattern, '*', pattern_len); + size_t dot_count = 0; + const unsigned char *suffix_start; + size_t suffix_length; + if (star == NULL) + return NULL; + suffix_start = star + 1; + suffix_length = (pattern + pattern_len) - (star + 1); + if (!(valid_domain_characters(pattern, star - pattern) && + valid_domain_characters(suffix_start, suffix_length))) + return NULL; + /* Check that the suffix matches at least two labels. */ + while (suffix_length) + { + if (*suffix_start == '.') + ++dot_count; + ++suffix_start; + --suffix_length; + } + if (dot_count < 2) + return NULL; + return star; + } + +/* Compare using wildcards. */ +static int equal_wildcard(const unsigned char *pattern, size_t pattern_len, + const unsigned char *subject, size_t subject_len) + { + const unsigned char *star = wildcard_find_star(pattern, pattern_len); + if (star == NULL) + return equal_nocase(pattern, pattern_len, + subject, subject_len); + return wildcard_match(pattern, star - pattern, + star + 1, (pattern + pattern_len) - star - 1, + subject, subject_len); + } + +/* Compare an ASN1_STRING to a supplied string. If they match + * return 1. If cmp_type > 0 only compare if string matches the + * type, otherwise convert it to UTF8. + */ + +static int do_check_string(ASN1_STRING *a, int cmp_type, equal_fn equal, + const unsigned char *b, size_t blen) + { + if (!a->data || !a->length) + return 0; + if (cmp_type > 0) + { + if (cmp_type != a->type) + return 0; + if (cmp_type == V_ASN1_IA5STRING) + return equal(a->data, a->length, b, blen); + if (a->length == (int)blen && !memcmp(a->data, b, blen)) + return 1; + else + return 0; + } + else + { + int astrlen, rv; + unsigned char *astr; + astrlen = ASN1_STRING_to_UTF8(&astr, a); + if (astrlen < 0) + return -1; + rv = equal(astr, astrlen, b, blen); + OPENSSL_free(astr); + return rv; + } + } + +static int do_x509_check(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags, int check_type) + { + STACK_OF(GENERAL_NAME) *gens = NULL; + X509_NAME *name = NULL; + int i; + int cnid; + int alt_type; + equal_fn equal; + if (check_type == GEN_EMAIL) + { + cnid = NID_pkcs9_emailAddress; + alt_type = V_ASN1_IA5STRING; + equal = equal_email; + } + else if (check_type == GEN_DNS) + { + cnid = NID_commonName; + alt_type = V_ASN1_IA5STRING; + if (flags & X509_CHECK_FLAG_NO_WILDCARDS) + equal = equal_nocase; + else + equal = equal_wildcard; + } + else + { + cnid = 0; + alt_type = V_ASN1_OCTET_STRING; + equal = equal_case; + } + + if (chklen == 0) + chklen = strlen((const char *)chk); + + gens = X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL); + if (gens) + { + int rv = 0; + for (i = 0; i < sk_GENERAL_NAME_num(gens); i++) + { + GENERAL_NAME *gen; + ASN1_STRING *cstr; + gen = sk_GENERAL_NAME_value(gens, i); + if(gen->type != check_type) + continue; + if (check_type == GEN_EMAIL) + cstr = gen->d.rfc822Name; + else if (check_type == GEN_DNS) + cstr = gen->d.dNSName; + else + cstr = gen->d.iPAddress; + if (do_check_string(cstr, alt_type, equal, chk, chklen)) + { + rv = 1; + break; + } + } + GENERAL_NAMES_free(gens); + if (rv) + return 1; + if (!(flags & X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT) || !cnid) + return 0; + } + i = -1; + name = X509_get_subject_name(x); + while((i = X509_NAME_get_index_by_NID(name, cnid, i)) >= 0) + { + X509_NAME_ENTRY *ne; + ASN1_STRING *str; + ne = X509_NAME_get_entry(name, i); + str = X509_NAME_ENTRY_get_data(ne); + if (do_check_string(str, -1, equal, chk, chklen)) + return 1; + } + return 0; + } + +#if OPENSSL_VERSION_NUMBER < 0x1000200fL + +int X509_check_host(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags, char **peername) + { + return do_x509_check(x, chk, chklen, flags, GEN_DNS); + } + +int X509_check_email(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags) + { + return do_x509_check(x, chk, chklen, flags, GEN_EMAIL); + } + +int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags) + { + return do_x509_check(x, chk, chklen, flags, GEN_IPADD); + } + +#endif /* OPENSSL_VERSION_NUMBER < 0x1000200fL */ + +#endif diff --git a/vendor/github.com/libp2p/go-openssl/hostname.go b/vendor/github.com/libp2p/go-openssl/hostname.go new file mode 100644 index 0000000000..c92d959e63 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/hostname.go @@ -0,0 +1,132 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +/* +#include +#include +#include + +#ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT +#define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1 +#define X509_CHECK_FLAG_NO_WILDCARDS 0x2 + +extern int X509_check_host(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags, char **peername); +extern int X509_check_email(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags); +extern int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen, + unsigned int flags); +#endif +*/ +import "C" + +import ( + "errors" + "net" + "unsafe" +) + +var ( + ValidationError = errors.New("Host validation error") +) + +type CheckFlags int + +const ( + AlwaysCheckSubject CheckFlags = C.X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT + NoWildcards CheckFlags = C.X509_CHECK_FLAG_NO_WILDCARDS +) + +// CheckHost checks that the X509 certificate is signed for the provided +// host name. See http://www.openssl.org/docs/crypto/X509_check_host.html for +// more. Note that CheckHost does not check the IP field. See VerifyHostname. +// Specifically returns ValidationError if the Certificate didn't match but +// there was no internal error. +func (c *Certificate) CheckHost(host string, flags CheckFlags) error { + chost := unsafe.Pointer(C.CString(host)) + defer C.free(chost) + + rv := C.X509_check_host(c.x, (*C.uchar)(chost), C.size_t(len(host)), + C.uint(flags), nil) + if rv > 0 { + return nil + } + if rv == 0 { + return ValidationError + } + return errors.New("hostname validation had an internal failure") +} + +// CheckEmail checks that the X509 certificate is signed for the provided +// email address. See http://www.openssl.org/docs/crypto/X509_check_host.html +// for more. +// Specifically returns ValidationError if the Certificate didn't match but +// there was no internal error. +func (c *Certificate) CheckEmail(email string, flags CheckFlags) error { + cemail := unsafe.Pointer(C.CString(email)) + defer C.free(cemail) + rv := C.X509_check_email(c.x, (*C.uchar)(cemail), C.size_t(len(email)), + C.uint(flags)) + if rv > 0 { + return nil + } + if rv == 0 { + return ValidationError + } + return errors.New("email validation had an internal failure") +} + +// CheckIP checks that the X509 certificate is signed for the provided +// IP address. See http://www.openssl.org/docs/crypto/X509_check_host.html +// for more. +// Specifically returns ValidationError if the Certificate didn't match but +// there was no internal error. +func (c *Certificate) CheckIP(ip net.IP, flags CheckFlags) error { + // X509_check_ip will fail to validate the 16-byte representation of an IPv4 + // address, so convert to the 4-byte representation. + if ip4 := ip.To4(); ip4 != nil { + ip = ip4 + } + + cip := unsafe.Pointer(&ip[0]) + rv := C.X509_check_ip(c.x, (*C.uchar)(cip), C.size_t(len(ip)), + C.uint(flags)) + if rv > 0 { + return nil + } + if rv == 0 { + return ValidationError + } + return errors.New("ip validation had an internal failure") +} + +// VerifyHostname is a combination of CheckHost and CheckIP. If the provided +// hostname looks like an IP address, it will be checked as an IP address, +// otherwise it will be checked as a hostname. +// Specifically returns ValidationError if the Certificate didn't match but +// there was no internal error. +func (c *Certificate) VerifyHostname(host string) error { + var ip net.IP + if len(host) >= 3 && host[0] == '[' && host[len(host)-1] == ']' { + ip = net.ParseIP(host[1 : len(host)-1]) + } else { + ip = net.ParseIP(host) + } + if ip != nil { + return c.CheckIP(ip, 0) + } + return c.CheckHost(host, 0) +} diff --git a/vendor/github.com/libp2p/go-openssl/http.go b/vendor/github.com/libp2p/go-openssl/http.go new file mode 100644 index 0000000000..39bd5a28b5 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/http.go @@ -0,0 +1,61 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +import ( + "net/http" +) + +// ListenAndServeTLS will take an http.Handler and serve it using OpenSSL over +// the given tcp address, configured to use the provided cert and key files. +func ListenAndServeTLS(addr string, cert_file string, key_file string, + handler http.Handler) error { + return ServerListenAndServeTLS( + &http.Server{Addr: addr, Handler: handler}, cert_file, key_file) +} + +// ServerListenAndServeTLS will take an http.Server and serve it using OpenSSL +// configured to use the provided cert and key files. +func ServerListenAndServeTLS(srv *http.Server, + cert_file, key_file string) error { + addr := srv.Addr + if addr == "" { + addr = ":https" + } + + ctx, err := NewCtxFromFiles(cert_file, key_file) + if err != nil { + return err + } + + l, err := Listen("tcp", addr, ctx) + if err != nil { + return err + } + + return srv.Serve(l) +} + +// TODO: http client integration +// holy crap, getting this integrated nicely with the Go stdlib HTTP client +// stack so that it does proxying, connection pooling, and most importantly +// hostname verification is really hard. So much stuff is hardcoded to just use +// the built-in TLS lib. I think to get this to work either some crazy +// hacktackery beyond me, an almost straight up fork of the HTTP client, or +// serious stdlib internal refactoring is necessary. +// even more so, good luck getting openssl to use the operating system default +// root certificates if the user doesn't provide any. sadlol +// NOTE: if you're going to try and write your own round tripper, at least use +// openssl.Dial, or equivalent logic diff --git a/vendor/github.com/libp2p/go-openssl/init.go b/vendor/github.com/libp2p/go-openssl/init.go new file mode 100644 index 0000000000..17dc6f3875 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/init.go @@ -0,0 +1,117 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package openssl is a light wrapper around OpenSSL for Go. + +It strives to provide a near-drop-in replacement for the Go standard library +tls package, while allowing for: + +Performance + +OpenSSL is battle-tested and optimized C. While Go's built-in library shows +great promise, it is still young and in some places, inefficient. This simple +OpenSSL wrapper can often do at least 2x with the same cipher and protocol. + +On my lappytop, I get the following benchmarking speeds: + BenchmarkSHA1Large_openssl 1000 2611282 ns/op 401.56 MB/s + BenchmarkSHA1Large_stdlib 500 3963983 ns/op 264.53 MB/s + BenchmarkSHA1Small_openssl 1000000 3476 ns/op 0.29 MB/s + BenchmarkSHA1Small_stdlib 5000000 550 ns/op 1.82 MB/s + BenchmarkSHA256Large_openssl 200 8085314 ns/op 129.69 MB/s + BenchmarkSHA256Large_stdlib 100 18948189 ns/op 55.34 MB/s + BenchmarkSHA256Small_openssl 1000000 4262 ns/op 0.23 MB/s + BenchmarkSHA256Small_stdlib 1000000 1444 ns/op 0.69 MB/s + BenchmarkOpenSSLThroughput 100000 21634 ns/op 47.33 MB/s + BenchmarkStdlibThroughput 50000 58974 ns/op 17.36 MB/s + +Interoperability + +Many systems support OpenSSL with a variety of plugins and modules for things, +such as hardware acceleration in embedded devices. + +Greater flexibility and configuration + +OpenSSL allows for far greater configuration of corner cases and backwards +compatibility (such as support of SSLv2). You shouldn't be using SSLv2 if you +can help but, but sometimes you can't help it. + +Security + +Yeah yeah, Heartbleed. But according to the author of the standard library's +TLS implementation, Go's TLS library is vulnerable to timing attacks. And +whether or not OpenSSL received the appropriate amount of scrutiny +pre-Heartbleed, it sure is receiving it now. + +Usage + +Starting an HTTP server that uses OpenSSL is very easy. It's as simple as: + log.Fatal(openssl.ListenAndServeTLS( + ":8443", "my_server.crt", "my_server.key", myHandler)) + +Getting a net.Listener that uses OpenSSL is also easy: + ctx, err := openssl.NewCtxFromFiles("my_server.crt", "my_server.key") + if err != nil { + log.Fatal(err) + } + l, err := openssl.Listen("tcp", ":7777", ctx) + +Making a client connection is straightforward too: + ctx, err := NewCtx() + if err != nil { + log.Fatal(err) + } + err = ctx.LoadVerifyLocations("/etc/ssl/certs/ca-certificates.crt", "") + if err != nil { + log.Fatal(err) + } + conn, err := openssl.Dial("tcp", "localhost:7777", ctx, 0) + +Help wanted: To get this library to work with net/http's client, we +had to fork net/http. It would be nice if an alternate http client library +supported the generality needed to use OpenSSL instead of crypto/tls. +*/ +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "fmt" + "strings" +) + +func init() { + if rc := C.X_shim_init(); rc != 0 { + panic(fmt.Errorf("X_shim_init failed with %d", rc)) + } +} + +// errorFromErrorQueue needs to run in the same OS thread as the operation +// that caused the possible error +func errorFromErrorQueue() error { + var errs []string + for { + err := C.ERR_get_error() + if err == 0 { + break + } + errs = append(errs, fmt.Sprintf("%s:%s:%s", + C.GoString(C.ERR_lib_error_string(err)), + C.GoString(C.ERR_func_error_string(err)), + C.GoString(C.ERR_reason_error_string(err)))) + } + return errors.New(fmt.Sprintf("SSL errors: %s", strings.Join(errs, "\n"))) +} diff --git a/vendor/github.com/libp2p/go-openssl/init_posix.go b/vendor/github.com/libp2p/go-openssl/init_posix.go new file mode 100644 index 0000000000..605a24bc95 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/init_posix.go @@ -0,0 +1,68 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux darwin solaris freebsd openbsd +// +build !windows + +package openssl + +/* +#include +#include +#include + +pthread_mutex_t* goopenssl_locks; + +int go_init_locks() { + int rc = 0; + int nlock; + int i; + int locks_needed = CRYPTO_num_locks(); + + goopenssl_locks = (pthread_mutex_t*)malloc( + sizeof(pthread_mutex_t) * locks_needed); + if (!goopenssl_locks) { + return ENOMEM; + } + for (nlock = 0; nlock < locks_needed; ++nlock) { + rc = pthread_mutex_init(&goopenssl_locks[nlock], NULL); + if (rc != 0) { + break; + } + } + + if (rc != 0) { + for (i = nlock - 1; i >= 0; --i) { + pthread_mutex_destroy(&goopenssl_locks[i]); + } + free(goopenssl_locks); + goopenssl_locks = NULL; + } + return rc; +} + +void go_thread_locking_callback(int mode, int n, const char *file, + int line) { + if (mode & CRYPTO_LOCK) { + pthread_mutex_lock(&goopenssl_locks[n]); + } else { + pthread_mutex_unlock(&goopenssl_locks[n]); + } +} + +unsigned long go_thread_id_callback(void) { + return (unsigned long)pthread_self(); +} +*/ +import "C" diff --git a/vendor/github.com/libp2p/go-openssl/init_windows.go b/vendor/github.com/libp2p/go-openssl/init_windows.go new file mode 100644 index 0000000000..051133c393 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/init_windows.go @@ -0,0 +1,57 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package openssl + +/* +#include +#include +#include + +CRITICAL_SECTION* goopenssl_locks; + +int go_init_locks() { + int rc = 0; + int nlock; + int i; + int locks_needed = CRYPTO_num_locks(); + + goopenssl_locks = (CRITICAL_SECTION*)malloc( + sizeof(*goopenssl_locks) * locks_needed); + if (!goopenssl_locks) { + return ENOMEM; + } + for (nlock = 0; nlock < locks_needed; ++nlock) { + InitializeCriticalSection(&goopenssl_locks[nlock]); + } + + return 0; +} + +void go_thread_locking_callback(int mode, int n, const char *file, + int line) { + if (mode & CRYPTO_LOCK) { + EnterCriticalSection(&goopenssl_locks[n]); + } else { + LeaveCriticalSection(&goopenssl_locks[n]); + } +} + +unsigned long go_thread_id_callback(void) { + return (unsigned long)GetCurrentThreadId(); +} +*/ +import "C" diff --git a/vendor/github.com/libp2p/go-openssl/key.go b/vendor/github.com/libp2p/go-openssl/key.go new file mode 100644 index 0000000000..268ff01373 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/key.go @@ -0,0 +1,518 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "io/ioutil" + "runtime" + "unsafe" +) + +var ( // some (effectively) constants for tests to refer to + ed25519_support = C.X_ED25519_SUPPORT != 0 +) + +type Method *C.EVP_MD + +var ( + SHA1_Method Method = C.X_EVP_sha1() + SHA256_Method Method = C.X_EVP_sha256() + SHA512_Method Method = C.X_EVP_sha512() +) + +// Constants for the various key types. +// Mapping of name -> NID taken from openssl/evp.h +const ( + KeyTypeNone = NID_undef + KeyTypeRSA = NID_rsaEncryption + KeyTypeRSA2 = NID_rsa + KeyTypeDSA = NID_dsa + KeyTypeDSA1 = NID_dsa_2 + KeyTypeDSA2 = NID_dsaWithSHA + KeyTypeDSA3 = NID_dsaWithSHA1 + KeyTypeDSA4 = NID_dsaWithSHA1_2 + KeyTypeDH = NID_dhKeyAgreement + KeyTypeDHX = NID_dhpublicnumber + KeyTypeEC = NID_X9_62_id_ecPublicKey + KeyTypeHMAC = NID_hmac + KeyTypeCMAC = NID_cmac + KeyTypeTLS1PRF = NID_tls1_prf + KeyTypeHKDF = NID_hkdf + KeyTypeX25519 = NID_X25519 + KeyTypeX448 = NID_X448 + KeyTypeED25519 = NID_ED25519 + KeyTypeED448 = NID_ED448 +) + +type PublicKey interface { + // Verifies the data signature using PKCS1.15 + VerifyPKCS1v15(method Method, data, sig []byte) error + + // MarshalPKIXPublicKeyPEM converts the public key to PEM-encoded PKIX + // format + MarshalPKIXPublicKeyPEM() (pem_block []byte, err error) + + // MarshalPKIXPublicKeyDER converts the public key to DER-encoded PKIX + // format + MarshalPKIXPublicKeyDER() (der_block []byte, err error) + + // KeyType returns an identifier for what kind of key is represented by this + // object. + KeyType() NID + + // BaseType returns an identifier for what kind of key is represented + // by this object. + // Keys that share same algorithm but use different legacy formats + // will have the same BaseType. + // + // For example, a key with a `KeyType() == KeyTypeRSA` and a key with a + // `KeyType() == KeyTypeRSA2` would both have `BaseType() == KeyTypeRSA`. + BaseType() NID + + // Equal compares the key with the passed in key. + Equal(key PublicKey) bool + + // Size returns the size (in bytes) of signatures created with this key. + Size() int + + evpPKey() *C.EVP_PKEY +} + +type PrivateKey interface { + PublicKey + + // Signs the data using PKCS1.15 + SignPKCS1v15(Method, []byte) ([]byte, error) + + // MarshalPKCS1PrivateKeyPEM converts the private key to PEM-encoded PKCS1 + // format + MarshalPKCS1PrivateKeyPEM() (pem_block []byte, err error) + + // MarshalPKCS1PrivateKeyDER converts the private key to DER-encoded PKCS1 + // format + MarshalPKCS1PrivateKeyDER() (der_block []byte, err error) +} + +type pKey struct { + key *C.EVP_PKEY +} + +func (key *pKey) evpPKey() *C.EVP_PKEY { return key.key } + +func (key *pKey) Equal(other PublicKey) bool { + return C.EVP_PKEY_cmp(key.key, other.evpPKey()) == 1 +} + +func (key *pKey) KeyType() NID { + return NID(C.EVP_PKEY_id(key.key)) +} + +func (key *pKey) Size() int { + return int(C.EVP_PKEY_size(key.key)) +} + +func (key *pKey) BaseType() NID { + return NID(C.EVP_PKEY_base_id(key.key)) +} + +func (key *pKey) SignPKCS1v15(method Method, data []byte) ([]byte, error) { + + ctx := C.X_EVP_MD_CTX_new() + defer C.X_EVP_MD_CTX_free(ctx) + + if key.KeyType() == KeyTypeED25519 { + // do ED specific one-shot sign + + if method != nil || len(data) == 0 { + return nil, errors.New("signpkcs1v15: 0-length data or non-null digest") + } + + if 1 != C.X_EVP_DigestSignInit(ctx, nil, nil, nil, key.key) { + return nil, errors.New("signpkcs1v15: failed to init signature") + } + + // evp signatures are 64 bytes + sig := make([]byte, 64, 64) + var sigblen C.size_t = 64 + if 1 != C.X_EVP_DigestSign(ctx, + ((*C.uchar)(unsafe.Pointer(&sig[0]))), + &sigblen, + (*C.uchar)(unsafe.Pointer(&data[0])), + C.size_t(len(data))) { + return nil, errors.New("signpkcs1v15: failed to do one-shot signature") + } + + return sig[:sigblen], nil + } else { + if 1 != C.X_EVP_SignInit(ctx, method) { + return nil, errors.New("signpkcs1v15: failed to init signature") + } + if len(data) > 0 { + if 1 != C.X_EVP_SignUpdate( + ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) { + return nil, errors.New("signpkcs1v15: failed to update signature") + } + } + sig := make([]byte, C.X_EVP_PKEY_size(key.key)) + var sigblen C.uint + if 1 != C.X_EVP_SignFinal(ctx, + ((*C.uchar)(unsafe.Pointer(&sig[0]))), &sigblen, key.key) { + return nil, errors.New("signpkcs1v15: failed to finalize signature") + } + return sig[:sigblen], nil + } +} + +func (key *pKey) VerifyPKCS1v15(method Method, data, sig []byte) error { + ctx := C.X_EVP_MD_CTX_new() + defer C.X_EVP_MD_CTX_free(ctx) + + if key.KeyType() == KeyTypeED25519 { + // do ED specific one-shot sign + + if method != nil || len(data) == 0 || len(sig) == 0 { + return errors.New("verifypkcs1v15: 0-length data or sig or non-null digest") + } + + if 1 != C.X_EVP_DigestVerifyInit(ctx, nil, nil, nil, key.key) { + return errors.New("verifypkcs1v15: failed to init verify") + } + + if 1 != C.X_EVP_DigestVerify(ctx, + ((*C.uchar)(unsafe.Pointer(&sig[0]))), + C.size_t(len(sig)), + (*C.uchar)(unsafe.Pointer(&data[0])), + C.size_t(len(data))) { + return errors.New("verifypkcs1v15: failed to do one-shot verify") + } + + return nil + + } else { + if 1 != C.X_EVP_VerifyInit(ctx, method) { + return errors.New("verifypkcs1v15: failed to init verify") + } + if len(data) > 0 { + if 1 != C.X_EVP_VerifyUpdate( + ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) { + return errors.New("verifypkcs1v15: failed to update verify") + } + } + if 1 != C.X_EVP_VerifyFinal(ctx, + ((*C.uchar)(unsafe.Pointer(&sig[0]))), C.uint(len(sig)), key.key) { + return errors.New("verifypkcs1v15: failed to finalize verify") + } + return nil + } +} + +func (key *pKey) MarshalPKCS1PrivateKeyPEM() (pem_block []byte, + err error) { + bio := C.BIO_new(C.BIO_s_mem()) + if bio == nil { + return nil, errors.New("failed to allocate memory BIO") + } + defer C.BIO_free(bio) + + // PEM_write_bio_PrivateKey_traditional will use the key-specific PKCS1 + // format if one is available for that key type, otherwise it will encode + // to a PKCS8 key. + if int(C.X_PEM_write_bio_PrivateKey_traditional(bio, key.key, nil, nil, + C.int(0), nil, nil)) != 1 { + return nil, errors.New("failed dumping private key") + } + + return ioutil.ReadAll(asAnyBio(bio)) +} + +func (key *pKey) MarshalPKCS1PrivateKeyDER() (der_block []byte, + err error) { + bio := C.BIO_new(C.BIO_s_mem()) + if bio == nil { + return nil, errors.New("failed to allocate memory BIO") + } + defer C.BIO_free(bio) + + if int(C.i2d_PrivateKey_bio(bio, key.key)) != 1 { + return nil, errors.New("failed dumping private key der") + } + + return ioutil.ReadAll(asAnyBio(bio)) +} + +func (key *pKey) MarshalPKIXPublicKeyPEM() (pem_block []byte, + err error) { + bio := C.BIO_new(C.BIO_s_mem()) + if bio == nil { + return nil, errors.New("failed to allocate memory BIO") + } + defer C.BIO_free(bio) + + if int(C.PEM_write_bio_PUBKEY(bio, key.key)) != 1 { + return nil, errors.New("failed dumping public key pem") + } + + return ioutil.ReadAll(asAnyBio(bio)) +} + +func (key *pKey) MarshalPKIXPublicKeyDER() (der_block []byte, + err error) { + bio := C.BIO_new(C.BIO_s_mem()) + if bio == nil { + return nil, errors.New("failed to allocate memory BIO") + } + defer C.BIO_free(bio) + + if int(C.i2d_PUBKEY_bio(bio, key.key)) != 1 { + return nil, errors.New("failed dumping public key der") + } + + return ioutil.ReadAll(asAnyBio(bio)) +} + +// LoadPrivateKeyFromPEM loads a private key from a PEM-encoded block. +func LoadPrivateKeyFromPEM(pem_block []byte) (PrivateKey, error) { + if len(pem_block) == 0 { + return nil, errors.New("empty pem block") + } + bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), + C.int(len(pem_block))) + if bio == nil { + return nil, errors.New("failed creating bio") + } + defer C.BIO_free(bio) + + key := C.PEM_read_bio_PrivateKey(bio, nil, nil, nil) + if key == nil { + return nil, errors.New("failed reading private key") + } + + p := &pKey{key: key} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} + +// LoadPrivateKeyFromPEMWithPassword loads a private key from a PEM-encoded block. +func LoadPrivateKeyFromPEMWithPassword(pem_block []byte, password string) ( + PrivateKey, error) { + if len(pem_block) == 0 { + return nil, errors.New("empty pem block") + } + bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), + C.int(len(pem_block))) + if bio == nil { + return nil, errors.New("failed creating bio") + } + defer C.BIO_free(bio) + cs := C.CString(password) + defer C.free(unsafe.Pointer(cs)) + key := C.PEM_read_bio_PrivateKey(bio, nil, nil, unsafe.Pointer(cs)) + if key == nil { + return nil, errors.New("failed reading private key") + } + + p := &pKey{key: key} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} + +// LoadPrivateKeyFromDER loads a private key from a DER-encoded block. +func LoadPrivateKeyFromDER(der_block []byte) (PrivateKey, error) { + if len(der_block) == 0 { + return nil, errors.New("empty der block") + } + bio := C.BIO_new_mem_buf(unsafe.Pointer(&der_block[0]), + C.int(len(der_block))) + if bio == nil { + return nil, errors.New("failed creating bio") + } + defer C.BIO_free(bio) + + key := C.d2i_PrivateKey_bio(bio, nil) + if key == nil { + return nil, errors.New("failed reading private key der") + } + + p := &pKey{key: key} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} + +// LoadPrivateKeyFromPEMWidthPassword loads a private key from a PEM-encoded block. +// Backwards-compatible with typo +func LoadPrivateKeyFromPEMWidthPassword(pem_block []byte, password string) ( + PrivateKey, error) { + return LoadPrivateKeyFromPEMWithPassword(pem_block, password) +} + +// LoadPublicKeyFromPEM loads a public key from a PEM-encoded block. +func LoadPublicKeyFromPEM(pem_block []byte) (PublicKey, error) { + if len(pem_block) == 0 { + return nil, errors.New("empty pem block") + } + bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), + C.int(len(pem_block))) + if bio == nil { + return nil, errors.New("failed creating bio") + } + defer C.BIO_free(bio) + + key := C.PEM_read_bio_PUBKEY(bio, nil, nil, nil) + if key == nil { + return nil, errors.New("failed reading public key der") + } + + p := &pKey{key: key} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} + +// LoadPublicKeyFromDER loads a public key from a DER-encoded block. +func LoadPublicKeyFromDER(der_block []byte) (PublicKey, error) { + if len(der_block) == 0 { + return nil, errors.New("empty der block") + } + bio := C.BIO_new_mem_buf(unsafe.Pointer(&der_block[0]), + C.int(len(der_block))) + if bio == nil { + return nil, errors.New("failed creating bio") + } + defer C.BIO_free(bio) + + key := C.d2i_PUBKEY_bio(bio, nil) + if key == nil { + return nil, errors.New("failed reading public key der") + } + + p := &pKey{key: key} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} + +// GenerateRSAKey generates a new RSA private key with an exponent of 3. +func GenerateRSAKey(bits int) (PrivateKey, error) { + return GenerateRSAKeyWithExponent(bits, 3) +} + +// GenerateRSAKeyWithExponent generates a new RSA private key. +func GenerateRSAKeyWithExponent(bits int, exponent int) (PrivateKey, error) { + rsa := C.RSA_generate_key(C.int(bits), C.ulong(exponent), nil, nil) + if rsa == nil { + return nil, errors.New("failed to generate RSA key") + } + key := C.X_EVP_PKEY_new() + if key == nil { + return nil, errors.New("failed to allocate EVP_PKEY") + } + if C.X_EVP_PKEY_assign_charp(key, C.EVP_PKEY_RSA, (*C.char)(unsafe.Pointer(rsa))) != 1 { + C.X_EVP_PKEY_free(key) + return nil, errors.New("failed to assign RSA key") + } + p := &pKey{key: key} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} + +// GenerateECKey generates a new elliptic curve private key on the speicified +// curve. +func GenerateECKey(curve EllipticCurve) (PrivateKey, error) { + + // Create context for parameter generation + paramCtx := C.EVP_PKEY_CTX_new_id(C.EVP_PKEY_EC, nil) + if paramCtx == nil { + return nil, errors.New("failed creating EC parameter generation context") + } + defer C.EVP_PKEY_CTX_free(paramCtx) + + // Intialize the parameter generation + if int(C.EVP_PKEY_paramgen_init(paramCtx)) != 1 { + return nil, errors.New("failed initializing EC parameter generation context") + } + + // Set curve in EC parameter generation context + if int(C.X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(paramCtx, C.int(curve))) != 1 { + return nil, errors.New("failed setting curve in EC parameter generation context") + } + + // Create parameter object + var params *C.EVP_PKEY + if int(C.EVP_PKEY_paramgen(paramCtx, ¶ms)) != 1 { + return nil, errors.New("failed creating EC key generation parameters") + } + defer C.EVP_PKEY_free(params) + + // Create context for the key generation + keyCtx := C.EVP_PKEY_CTX_new(params, nil) + if keyCtx == nil { + return nil, errors.New("failed creating EC key generation context") + } + defer C.EVP_PKEY_CTX_free(keyCtx) + + // Generate the key + var privKey *C.EVP_PKEY + if int(C.EVP_PKEY_keygen_init(keyCtx)) != 1 { + return nil, errors.New("failed initializing EC key generation context") + } + if int(C.EVP_PKEY_keygen(keyCtx, &privKey)) != 1 { + return nil, errors.New("failed generating EC private key") + } + + p := &pKey{key: privKey} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} + +// GenerateED25519Key generates a Ed25519 key +func GenerateED25519Key() (PrivateKey, error) { + // Key context + keyCtx := C.EVP_PKEY_CTX_new_id(C.X_EVP_PKEY_ED25519, nil) + if keyCtx == nil { + return nil, errors.New("failed creating EC parameter generation context") + } + defer C.EVP_PKEY_CTX_free(keyCtx) + + // Generate the key + var privKey *C.EVP_PKEY + if int(C.EVP_PKEY_keygen_init(keyCtx)) != 1 { + return nil, errors.New("failed initializing ED25519 key generation context") + } + if int(C.EVP_PKEY_keygen(keyCtx, &privKey)) != 1 { + return nil, errors.New("failed generating ED25519 private key") + } + + p := &pKey{key: privKey} + runtime.SetFinalizer(p, func(p *pKey) { + C.X_EVP_PKEY_free(p.key) + }) + return p, nil +} diff --git a/vendor/github.com/libp2p/go-openssl/mapping.go b/vendor/github.com/libp2p/go-openssl/mapping.go new file mode 100644 index 0000000000..d78cc70347 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/mapping.go @@ -0,0 +1,62 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +import ( + "sync" + "unsafe" +) + +// #include +import "C" + +type mapping struct { + lock sync.Mutex + values map[token]unsafe.Pointer +} + +func newMapping() *mapping { + return &mapping{ + values: make(map[token]unsafe.Pointer), + } +} + +type token unsafe.Pointer + +func (m *mapping) Add(x unsafe.Pointer) token { + res := token(C.malloc(1)) + + m.lock.Lock() + m.values[res] = x + m.lock.Unlock() + + return res +} + +func (m *mapping) Get(x token) unsafe.Pointer { + m.lock.Lock() + res := m.values[x] + m.lock.Unlock() + + return res +} + +func (m *mapping) Del(x token) { + m.lock.Lock() + delete(m.values, x) + m.lock.Unlock() + + C.free(unsafe.Pointer(x)) +} diff --git a/vendor/github.com/libp2p/go-openssl/md4.go b/vendor/github.com/libp2p/go-openssl/md4.go new file mode 100644 index 0000000000..e5cc7d8679 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/md4.go @@ -0,0 +1,89 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "runtime" + "unsafe" +) + +type MD4Hash struct { + ctx *C.EVP_MD_CTX + engine *Engine +} + +func NewMD4Hash() (*MD4Hash, error) { return NewMD4HashWithEngine(nil) } + +func NewMD4HashWithEngine(e *Engine) (*MD4Hash, error) { + hash := &MD4Hash{engine: e} + hash.ctx = C.X_EVP_MD_CTX_new() + if hash.ctx == nil { + return nil, errors.New("openssl: md4: unable to allocate ctx") + } + runtime.SetFinalizer(hash, func(hash *MD4Hash) { hash.Close() }) + if err := hash.Reset(); err != nil { + return nil, err + } + return hash, nil +} + +func (s *MD4Hash) Close() { + if s.ctx != nil { + C.X_EVP_MD_CTX_free(s.ctx) + s.ctx = nil + } +} + +func (s *MD4Hash) Reset() error { + if 1 != C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_md4(), engineRef(s.engine)) { + return errors.New("openssl: md4: cannot init digest ctx") + } + return nil +} + +func (s *MD4Hash) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if 1 != C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), + C.size_t(len(p))) { + return 0, errors.New("openssl: md4: cannot update digest") + } + return len(p), nil +} + +func (s *MD4Hash) Sum() (result [16]byte, err error) { + if 1 != C.X_EVP_DigestFinal_ex(s.ctx, + (*C.uchar)(unsafe.Pointer(&result[0])), nil) { + return result, errors.New("openssl: md4: cannot finalize ctx") + } + return result, s.Reset() +} + +func MD4(data []byte) (result [16]byte, err error) { + hash, err := NewMD4Hash() + if err != nil { + return result, err + } + defer hash.Close() + if _, err := hash.Write(data); err != nil { + return result, err + } + return hash.Sum() +} diff --git a/vendor/github.com/libp2p/go-openssl/md5.go b/vendor/github.com/libp2p/go-openssl/md5.go new file mode 100644 index 0000000000..82f2eb2f27 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/md5.go @@ -0,0 +1,89 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "runtime" + "unsafe" +) + +type MD5Hash struct { + ctx *C.EVP_MD_CTX + engine *Engine +} + +func NewMD5Hash() (*MD5Hash, error) { return NewMD5HashWithEngine(nil) } + +func NewMD5HashWithEngine(e *Engine) (*MD5Hash, error) { + hash := &MD5Hash{engine: e} + hash.ctx = C.X_EVP_MD_CTX_new() + if hash.ctx == nil { + return nil, errors.New("openssl: md5: unable to allocate ctx") + } + runtime.SetFinalizer(hash, func(hash *MD5Hash) { hash.Close() }) + if err := hash.Reset(); err != nil { + return nil, err + } + return hash, nil +} + +func (s *MD5Hash) Close() { + if s.ctx != nil { + C.X_EVP_MD_CTX_free(s.ctx) + s.ctx = nil + } +} + +func (s *MD5Hash) Reset() error { + if 1 != C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_md5(), engineRef(s.engine)) { + return errors.New("openssl: md5: cannot init digest ctx") + } + return nil +} + +func (s *MD5Hash) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if 1 != C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), + C.size_t(len(p))) { + return 0, errors.New("openssl: md5: cannot update digest") + } + return len(p), nil +} + +func (s *MD5Hash) Sum() (result [16]byte, err error) { + if 1 != C.X_EVP_DigestFinal_ex(s.ctx, + (*C.uchar)(unsafe.Pointer(&result[0])), nil) { + return result, errors.New("openssl: md5: cannot finalize ctx") + } + return result, s.Reset() +} + +func MD5(data []byte) (result [16]byte, err error) { + hash, err := NewMD5Hash() + if err != nil { + return result, err + } + defer hash.Close() + if _, err := hash.Write(data); err != nil { + return result, err + } + return hash.Sum() +} diff --git a/vendor/github.com/libp2p/go-openssl/net.go b/vendor/github.com/libp2p/go-openssl/net.go new file mode 100644 index 0000000000..54beb8ee92 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/net.go @@ -0,0 +1,147 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +import ( + "errors" + "net" +) + +type listener struct { + net.Listener + ctx *Ctx +} + +func (l *listener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return nil, err + } + ssl_c, err := Server(c, l.ctx) + if err != nil { + c.Close() + return nil, err + } + return ssl_c, nil +} + +// NewListener wraps an existing net.Listener such that all accepted +// connections are wrapped as OpenSSL server connections using the provided +// context ctx. +func NewListener(inner net.Listener, ctx *Ctx) net.Listener { + return &listener{ + Listener: inner, + ctx: ctx} +} + +// Listen is a wrapper around net.Listen that wraps incoming connections with +// an OpenSSL server connection using the provided context ctx. +func Listen(network, laddr string, ctx *Ctx) (net.Listener, error) { + if ctx == nil { + return nil, errors.New("no ssl context provided") + } + l, err := net.Listen(network, laddr) + if err != nil { + return nil, err + } + return NewListener(l, ctx), nil +} + +type DialFlags int + +const ( + InsecureSkipHostVerification DialFlags = 1 << iota + DisableSNI +) + +// Dial will connect to network/address and then wrap the corresponding +// underlying connection with an OpenSSL client connection using context ctx. +// If flags includes InsecureSkipHostVerification, the server certificate's +// hostname will not be checked to match the hostname in addr. Otherwise, flags +// should be 0. +// +// Dial probably won't work for you unless you set a verify location or add +// some certs to the certificate store of the client context you're using. +// This library is not nice enough to use the system certificate store by +// default for you yet. +func Dial(network, addr string, ctx *Ctx, flags DialFlags) (*Conn, error) { + return DialSession(network, addr, ctx, flags, nil) +} + +// DialSession will connect to network/address and then wrap the corresponding +// underlying connection with an OpenSSL client connection using context ctx. +// If flags includes InsecureSkipHostVerification, the server certificate's +// hostname will not be checked to match the hostname in addr. Otherwise, flags +// should be 0. +// +// Dial probably won't work for you unless you set a verify location or add +// some certs to the certificate store of the client context you're using. +// This library is not nice enough to use the system certificate store by +// default for you yet. +// +// If session is not nil it will be used to resume the tls state. The session +// can be retrieved from the GetSession method on the Conn. +func DialSession(network, addr string, ctx *Ctx, flags DialFlags, + session []byte) (*Conn, error) { + + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + if ctx == nil { + var err error + ctx, err = NewCtx() + if err != nil { + return nil, err + } + // TODO: use operating system default certificate chain? + } + c, err := net.Dial(network, addr) + if err != nil { + return nil, err + } + conn, err := Client(c, ctx) + if err != nil { + c.Close() + return nil, err + } + if session != nil { + err := conn.setSession(session) + if err != nil { + c.Close() + return nil, err + } + } + if flags&DisableSNI == 0 { + err = conn.SetTlsExtHostName(host) + if err != nil { + conn.Close() + return nil, err + } + } + err = conn.Handshake() + if err != nil { + conn.Close() + return nil, err + } + if flags&InsecureSkipHostVerification == 0 { + err = conn.VerifyHostname(host) + if err != nil { + conn.Close() + return nil, err + } + } + return conn, nil +} diff --git a/vendor/github.com/libp2p/go-openssl/nid.go b/vendor/github.com/libp2p/go-openssl/nid.go new file mode 100644 index 0000000000..936a52e779 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/nid.go @@ -0,0 +1,210 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +type NID int + +const ( + NID_undef NID = 0 + NID_rsadsi NID = 1 + NID_pkcs NID = 2 + NID_md2 NID = 3 + NID_md5 NID = 4 + NID_rc4 NID = 5 + NID_rsaEncryption NID = 6 + NID_md2WithRSAEncryption NID = 7 + NID_md5WithRSAEncryption NID = 8 + NID_pbeWithMD2AndDES_CBC NID = 9 + NID_pbeWithMD5AndDES_CBC NID = 10 + NID_X500 NID = 11 + NID_X509 NID = 12 + NID_commonName NID = 13 + NID_countryName NID = 14 + NID_localityName NID = 15 + NID_stateOrProvinceName NID = 16 + NID_organizationName NID = 17 + NID_organizationalUnitName NID = 18 + NID_rsa NID = 19 + NID_pkcs7 NID = 20 + NID_pkcs7_data NID = 21 + NID_pkcs7_signed NID = 22 + NID_pkcs7_enveloped NID = 23 + NID_pkcs7_signedAndEnveloped NID = 24 + NID_pkcs7_digest NID = 25 + NID_pkcs7_encrypted NID = 26 + NID_pkcs3 NID = 27 + NID_dhKeyAgreement NID = 28 + NID_des_ecb NID = 29 + NID_des_cfb64 NID = 30 + NID_des_cbc NID = 31 + NID_des_ede NID = 32 + NID_des_ede3 NID = 33 + NID_idea_cbc NID = 34 + NID_idea_cfb64 NID = 35 + NID_idea_ecb NID = 36 + NID_rc2_cbc NID = 37 + NID_rc2_ecb NID = 38 + NID_rc2_cfb64 NID = 39 + NID_rc2_ofb64 NID = 40 + NID_sha NID = 41 + NID_shaWithRSAEncryption NID = 42 + NID_des_ede_cbc NID = 43 + NID_des_ede3_cbc NID = 44 + NID_des_ofb64 NID = 45 + NID_idea_ofb64 NID = 46 + NID_pkcs9 NID = 47 + NID_pkcs9_emailAddress NID = 48 + NID_pkcs9_unstructuredName NID = 49 + NID_pkcs9_contentType NID = 50 + NID_pkcs9_messageDigest NID = 51 + NID_pkcs9_signingTime NID = 52 + NID_pkcs9_countersignature NID = 53 + NID_pkcs9_challengePassword NID = 54 + NID_pkcs9_unstructuredAddress NID = 55 + NID_pkcs9_extCertAttributes NID = 56 + NID_netscape NID = 57 + NID_netscape_cert_extension NID = 58 + NID_netscape_data_type NID = 59 + NID_des_ede_cfb64 NID = 60 + NID_des_ede3_cfb64 NID = 61 + NID_des_ede_ofb64 NID = 62 + NID_des_ede3_ofb64 NID = 63 + NID_sha1 NID = 64 + NID_sha1WithRSAEncryption NID = 65 + NID_dsaWithSHA NID = 66 + NID_dsa_2 NID = 67 + NID_pbeWithSHA1AndRC2_CBC NID = 68 + NID_id_pbkdf2 NID = 69 + NID_dsaWithSHA1_2 NID = 70 + NID_netscape_cert_type NID = 71 + NID_netscape_base_url NID = 72 + NID_netscape_revocation_url NID = 73 + NID_netscape_ca_revocation_url NID = 74 + NID_netscape_renewal_url NID = 75 + NID_netscape_ca_policy_url NID = 76 + NID_netscape_ssl_server_name NID = 77 + NID_netscape_comment NID = 78 + NID_netscape_cert_sequence NID = 79 + NID_desx_cbc NID = 80 + NID_id_ce NID = 81 + NID_subject_key_identifier NID = 82 + NID_key_usage NID = 83 + NID_private_key_usage_period NID = 84 + NID_subject_alt_name NID = 85 + NID_issuer_alt_name NID = 86 + NID_basic_constraints NID = 87 + NID_crl_number NID = 88 + NID_certificate_policies NID = 89 + NID_authority_key_identifier NID = 90 + NID_bf_cbc NID = 91 + NID_bf_ecb NID = 92 + NID_bf_cfb64 NID = 93 + NID_bf_ofb64 NID = 94 + NID_mdc2 NID = 95 + NID_mdc2WithRSA NID = 96 + NID_rc4_40 NID = 97 + NID_rc2_40_cbc NID = 98 + NID_givenName NID = 99 + NID_surname NID = 100 + NID_initials NID = 101 + NID_uniqueIdentifier NID = 102 + NID_crl_distribution_points NID = 103 + NID_md5WithRSA NID = 104 + NID_serialNumber NID = 105 + NID_title NID = 106 + NID_description NID = 107 + NID_cast5_cbc NID = 108 + NID_cast5_ecb NID = 109 + NID_cast5_cfb64 NID = 110 + NID_cast5_ofb64 NID = 111 + NID_pbeWithMD5AndCast5_CBC NID = 112 + NID_dsaWithSHA1 NID = 113 + NID_md5_sha1 NID = 114 + NID_sha1WithRSA NID = 115 + NID_dsa NID = 116 + NID_ripemd160 NID = 117 + NID_ripemd160WithRSA NID = 119 + NID_rc5_cbc NID = 120 + NID_rc5_ecb NID = 121 + NID_rc5_cfb64 NID = 122 + NID_rc5_ofb64 NID = 123 + NID_rle_compression NID = 124 + NID_zlib_compression NID = 125 + NID_ext_key_usage NID = 126 + NID_id_pkix NID = 127 + NID_id_kp NID = 128 + NID_server_auth NID = 129 + NID_client_auth NID = 130 + NID_code_sign NID = 131 + NID_email_protect NID = 132 + NID_time_stamp NID = 133 + NID_ms_code_ind NID = 134 + NID_ms_code_com NID = 135 + NID_ms_ctl_sign NID = 136 + NID_ms_sgc NID = 137 + NID_ms_efs NID = 138 + NID_ns_sgc NID = 139 + NID_delta_crl NID = 140 + NID_crl_reason NID = 141 + NID_invalidity_date NID = 142 + NID_sxnet NID = 143 + NID_pbe_WithSHA1And128BitRC4 NID = 144 + NID_pbe_WithSHA1And40BitRC4 NID = 145 + NID_pbe_WithSHA1And3_Key_TripleDES_CBC NID = 146 + NID_pbe_WithSHA1And2_Key_TripleDES_CBC NID = 147 + NID_pbe_WithSHA1And128BitRC2_CBC NID = 148 + NID_pbe_WithSHA1And40BitRC2_CBC NID = 149 + NID_keyBag NID = 150 + NID_pkcs8ShroudedKeyBag NID = 151 + NID_certBag NID = 152 + NID_crlBag NID = 153 + NID_secretBag NID = 154 + NID_safeContentsBag NID = 155 + NID_friendlyName NID = 156 + NID_localKeyID NID = 157 + NID_x509Certificate NID = 158 + NID_sdsiCertificate NID = 159 + NID_x509Crl NID = 160 + NID_pbes2 NID = 161 + NID_pbmac1 NID = 162 + NID_hmacWithSHA1 NID = 163 + NID_id_qt_cps NID = 164 + NID_id_qt_unotice NID = 165 + NID_rc2_64_cbc NID = 166 + NID_SMIMECapabilities NID = 167 + NID_pbeWithMD2AndRC2_CBC NID = 168 + NID_pbeWithMD5AndRC2_CBC NID = 169 + NID_pbeWithSHA1AndDES_CBC NID = 170 + NID_ms_ext_req NID = 171 + NID_ext_req NID = 172 + NID_name NID = 173 + NID_dnQualifier NID = 174 + NID_id_pe NID = 175 + NID_id_ad NID = 176 + NID_info_access NID = 177 + NID_ad_OCSP NID = 178 + NID_ad_ca_issuers NID = 179 + NID_OCSP_sign NID = 180 + NID_X9_62_id_ecPublicKey NID = 408 + NID_hmac NID = 855 + NID_cmac NID = 894 + NID_dhpublicnumber NID = 920 + NID_tls1_prf NID = 1021 + NID_hkdf NID = 1036 + NID_X25519 NID = 1034 + NID_X448 NID = 1035 + NID_ED25519 NID = 1087 + NID_ED448 NID = 1088 +) diff --git a/vendor/github.com/libp2p/go-openssl/object.go b/vendor/github.com/libp2p/go-openssl/object.go new file mode 100644 index 0000000000..86ab1e4c00 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/object.go @@ -0,0 +1,24 @@ +// Copyright (C) 2020. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +// CreateObjectIdentifier creates ObjectIdentifier and returns NID for the created +// ObjectIdentifier +func CreateObjectIdentifier(oid string, shortName string, longName string) int { + return int(C.OBJ_create(C.CString(oid), C.CString(shortName), C.CString(longName))) +} diff --git a/vendor/github.com/libp2p/go-openssl/pem.go b/vendor/github.com/libp2p/go-openssl/pem.go new file mode 100644 index 0000000000..c8b0c1cf19 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/pem.go @@ -0,0 +1,32 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +import ( + "regexp" +) + +var pemSplit *regexp.Regexp = regexp.MustCompile(`(?sm)` + + `(^-----[\s-]*?BEGIN.*?-----$` + + `.*?` + + `^-----[\s-]*?END.*?-----$)`) + +func SplitPEM(data []byte) [][]byte { + var results [][]byte + for _, block := range pemSplit.FindAll(data, -1) { + results = append(results, block) + } + return results +} diff --git a/vendor/github.com/libp2p/go-openssl/sha1.go b/vendor/github.com/libp2p/go-openssl/sha1.go new file mode 100644 index 0000000000..c227bee846 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/sha1.go @@ -0,0 +1,96 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "runtime" + "unsafe" +) + +type SHA1Hash struct { + ctx *C.EVP_MD_CTX + engine *Engine +} + +func NewSHA1Hash() (*SHA1Hash, error) { return NewSHA1HashWithEngine(nil) } + +func NewSHA1HashWithEngine(e *Engine) (*SHA1Hash, error) { + hash := &SHA1Hash{engine: e} + hash.ctx = C.X_EVP_MD_CTX_new() + if hash.ctx == nil { + return nil, errors.New("openssl: sha1: unable to allocate ctx") + } + runtime.SetFinalizer(hash, func(hash *SHA1Hash) { hash.Close() }) + if err := hash.Reset(); err != nil { + return nil, err + } + return hash, nil +} + +func (s *SHA1Hash) Close() { + if s.ctx != nil { + C.X_EVP_MD_CTX_free(s.ctx) + s.ctx = nil + } +} + +func engineRef(e *Engine) *C.ENGINE { + if e == nil { + return nil + } + return e.e +} + +func (s *SHA1Hash) Reset() error { + if 1 != C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_sha1(), engineRef(s.engine)) { + return errors.New("openssl: sha1: cannot init digest ctx") + } + return nil +} + +func (s *SHA1Hash) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if 1 != C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), + C.size_t(len(p))) { + return 0, errors.New("openssl: sha1: cannot update digest") + } + return len(p), nil +} + +func (s *SHA1Hash) Sum() (result [20]byte, err error) { + if 1 != C.X_EVP_DigestFinal_ex(s.ctx, + (*C.uchar)(unsafe.Pointer(&result[0])), nil) { + return result, errors.New("openssl: sha1: cannot finalize ctx") + } + return result, s.Reset() +} + +func SHA1(data []byte) (result [20]byte, err error) { + hash, err := NewSHA1Hash() + if err != nil { + return result, err + } + defer hash.Close() + if _, err := hash.Write(data); err != nil { + return result, err + } + return hash.Sum() +} diff --git a/vendor/github.com/libp2p/go-openssl/sha256.go b/vendor/github.com/libp2p/go-openssl/sha256.go new file mode 100644 index 0000000000..d25c7a959d --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/sha256.go @@ -0,0 +1,89 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "errors" + "runtime" + "unsafe" +) + +type SHA256Hash struct { + ctx *C.EVP_MD_CTX + engine *Engine +} + +func NewSHA256Hash() (*SHA256Hash, error) { return NewSHA256HashWithEngine(nil) } + +func NewSHA256HashWithEngine(e *Engine) (*SHA256Hash, error) { + hash := &SHA256Hash{engine: e} + hash.ctx = C.X_EVP_MD_CTX_new() + if hash.ctx == nil { + return nil, errors.New("openssl: sha256: unable to allocate ctx") + } + runtime.SetFinalizer(hash, func(hash *SHA256Hash) { hash.Close() }) + if err := hash.Reset(); err != nil { + return nil, err + } + return hash, nil +} + +func (s *SHA256Hash) Close() { + if s.ctx != nil { + C.X_EVP_MD_CTX_free(s.ctx) + s.ctx = nil + } +} + +func (s *SHA256Hash) Reset() error { + if 1 != C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_sha256(), engineRef(s.engine)) { + return errors.New("openssl: sha256: cannot init digest ctx") + } + return nil +} + +func (s *SHA256Hash) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if 1 != C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), + C.size_t(len(p))) { + return 0, errors.New("openssl: sha256: cannot update digest") + } + return len(p), nil +} + +func (s *SHA256Hash) Sum() (result [32]byte, err error) { + if 1 != C.X_EVP_DigestFinal_ex(s.ctx, + (*C.uchar)(unsafe.Pointer(&result[0])), nil) { + return result, errors.New("openssl: sha256: cannot finalize ctx") + } + return result, s.Reset() +} + +func SHA256(data []byte) (result [32]byte, err error) { + hash, err := NewSHA256Hash() + if err != nil { + return result, err + } + defer hash.Close() + if _, err := hash.Write(data); err != nil { + return result, err + } + return hash.Sum() +} diff --git a/vendor/github.com/libp2p/go-openssl/shim.c b/vendor/github.com/libp2p/go-openssl/shim.c new file mode 100644 index 0000000000..6e680841cb --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/shim.c @@ -0,0 +1,770 @@ +/* + * Copyright (C) 2014 Space Monkey, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "_cgo_export.h" + +/* + * Functions defined in other .c files + */ +extern int go_init_locks(); +extern void go_thread_locking_callback(int, int, const char*, int); +extern unsigned long go_thread_id_callback(); +static int go_write_bio_puts(BIO *b, const char *str) { + return go_write_bio_write(b, (char*)str, (int)strlen(str)); +} + +/* + ************************************************ + * v1.1.1 and later implementation + ************************************************ + */ +#if OPENSSL_VERSION_NUMBER >= 0x1010100fL + +const int X_ED25519_SUPPORT = 1; +int X_EVP_PKEY_ED25519 = EVP_PKEY_ED25519; + +int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, + const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ + return EVP_DigestSignInit(ctx, pctx, type, e, pkey); +} + +int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, + size_t *siglen, const unsigned char *tbs, size_t tbslen) { + return EVP_DigestSign(ctx, sigret, siglen, tbs, tbslen); +} + + +int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, + const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ + return EVP_DigestVerifyInit(ctx, pctx, type, e, pkey); +} + +int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, + size_t siglen, const unsigned char *tbs, size_t tbslen){ + return EVP_DigestVerify(ctx, sigret, siglen, tbs, tbslen); +} + +#else + +const int X_ED25519_SUPPORT = 0; +int X_EVP_PKEY_ED25519 = EVP_PKEY_NONE; + +int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, + const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ + return 0; +} + +int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, + size_t *siglen, const unsigned char *tbs, size_t tbslen) { + return 0; +} + + +int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, + const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ + return 0; +} + +int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, + size_t siglen, const unsigned char *tbs, size_t tbslen){ + return 0; +} + +#endif + +/* + ************************************************ + * v1.1.X and later implementation + ************************************************ + */ +#if OPENSSL_VERSION_NUMBER >= 0x1010000fL + +void X_BIO_set_data(BIO* bio, void* data) { + BIO_set_data(bio, data); +} + +void* X_BIO_get_data(BIO* bio) { + return BIO_get_data(bio); +} + +EVP_MD_CTX* X_EVP_MD_CTX_new() { + return EVP_MD_CTX_new(); +} + +void X_EVP_MD_CTX_free(EVP_MD_CTX* ctx) { + EVP_MD_CTX_free(ctx); +} + +static int x_bio_create(BIO *b) { + BIO_set_shutdown(b, 1); + BIO_set_init(b, 1); + BIO_set_data(b, NULL); + BIO_clear_flags(b, ~0); + return 1; +} + +static int x_bio_free(BIO *b) { + return 1; +} + +static BIO_METHOD *writeBioMethod; +static BIO_METHOD *readBioMethod; + +BIO_METHOD* BIO_s_readBio() { return readBioMethod; } +BIO_METHOD* BIO_s_writeBio() { return writeBioMethod; } + +int x_bio_init_methods() { + writeBioMethod = BIO_meth_new(BIO_TYPE_SOURCE_SINK, "Go Write BIO"); + if (!writeBioMethod) { + return 1; + } + if (1 != BIO_meth_set_write(writeBioMethod, + (int (*)(BIO *, const char *, int))go_write_bio_write)) { + return 2; + } + if (1 != BIO_meth_set_puts(writeBioMethod, go_write_bio_puts)) { + return 3; + } + if (1 != BIO_meth_set_ctrl(writeBioMethod, go_write_bio_ctrl)) { + return 4; + } + if (1 != BIO_meth_set_create(writeBioMethod, x_bio_create)) { + return 5; + } + if (1 != BIO_meth_set_destroy(writeBioMethod, x_bio_free)) { + return 6; + } + + readBioMethod = BIO_meth_new(BIO_TYPE_SOURCE_SINK, "Go Read BIO"); + if (!readBioMethod) { + return 7; + } + if (1 != BIO_meth_set_read(readBioMethod, go_read_bio_read)) { + return 8; + } + if (1 != BIO_meth_set_ctrl(readBioMethod, go_read_bio_ctrl)) { + return 9; + } + if (1 != BIO_meth_set_create(readBioMethod, x_bio_create)) { + return 10; + } + if (1 != BIO_meth_set_destroy(readBioMethod, x_bio_free)) { + return 11; + } + + return 0; +} + +const EVP_MD *X_EVP_dss() { + return NULL; +} + +const EVP_MD *X_EVP_dss1() { + return NULL; +} + +const EVP_MD *X_EVP_sha() { + return NULL; +} + +int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) { + return EVP_CIPHER_CTX_encrypting(ctx); +} + +int X_X509_add_ref(X509* x509) { + return X509_up_ref(x509); +} + +const ASN1_TIME *X_X509_get0_notBefore(const X509 *x) { + return X509_get0_notBefore(x); +} + +const ASN1_TIME *X_X509_get0_notAfter(const X509 *x) { + return X509_get0_notAfter(x); +} + +HMAC_CTX *X_HMAC_CTX_new(void) { + return HMAC_CTX_new(); +} + +void X_HMAC_CTX_free(HMAC_CTX *ctx) { + HMAC_CTX_free(ctx); +} + +int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u) { + return PEM_write_bio_PrivateKey_traditional(bio, key, enc, kstr, klen, cb, u); +} + +#endif + +/* + ************************************************ + * v1.0.X implementation + ************************************************ + */ +#if OPENSSL_VERSION_NUMBER < 0x1010000fL + +static int x_bio_create(BIO *b) { + b->shutdown = 1; + b->init = 1; + b->num = -1; + b->ptr = NULL; + b->flags = 0; + return 1; +} + +static int x_bio_free(BIO *b) { + return 1; +} + +static BIO_METHOD writeBioMethod = { + BIO_TYPE_SOURCE_SINK, + "Go Write BIO", + (int (*)(BIO *, const char *, int))go_write_bio_write, + NULL, + go_write_bio_puts, + NULL, + go_write_bio_ctrl, + x_bio_create, + x_bio_free, + NULL}; + +static BIO_METHOD* BIO_s_writeBio() { return &writeBioMethod; } + +static BIO_METHOD readBioMethod = { + BIO_TYPE_SOURCE_SINK, + "Go Read BIO", + NULL, + go_read_bio_read, + NULL, + NULL, + go_read_bio_ctrl, + x_bio_create, + x_bio_free, + NULL}; + +static BIO_METHOD* BIO_s_readBio() { return &readBioMethod; } + +int x_bio_init_methods() { + /* statically initialized above */ + return 0; +} + +void X_BIO_set_data(BIO* bio, void* data) { + bio->ptr = data; +} + +void* X_BIO_get_data(BIO* bio) { + return bio->ptr; +} + +EVP_MD_CTX* X_EVP_MD_CTX_new() { + return EVP_MD_CTX_create(); +} + +void X_EVP_MD_CTX_free(EVP_MD_CTX* ctx) { + EVP_MD_CTX_destroy(ctx); +} + +int X_X509_add_ref(X509* x509) { + CRYPTO_add(&x509->references, 1, CRYPTO_LOCK_X509); + return 1; +} + +const ASN1_TIME *X_X509_get0_notBefore(const X509 *x) { + return x->cert_info->validity->notBefore; +} + +const ASN1_TIME *X_X509_get0_notAfter(const X509 *x) { + return x->cert_info->validity->notAfter; +} + +const EVP_MD *X_EVP_dss() { + return EVP_dss(); +} + +const EVP_MD *X_EVP_dss1() { + return EVP_dss1(); +} + +const EVP_MD *X_EVP_sha() { + return EVP_sha(); +} + +int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) { + return ctx->encrypt; +} + +HMAC_CTX *X_HMAC_CTX_new(void) { + /* v1.1.0 uses a OPENSSL_zalloc to allocate the memory which does not exist + * in previous versions. malloc+memset to get the same behavior */ + HMAC_CTX *ctx = (HMAC_CTX *)OPENSSL_malloc(sizeof(HMAC_CTX)); + if (ctx) { + memset(ctx, 0, sizeof(HMAC_CTX)); + HMAC_CTX_init(ctx); + } + return ctx; +} + +void X_HMAC_CTX_free(HMAC_CTX *ctx) { + if (ctx) { + HMAC_CTX_cleanup(ctx); + OPENSSL_free(ctx); + } +} + +int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u) { + /* PEM_write_bio_PrivateKey always tries to use the PKCS8 format if it + * is available, instead of using the "traditional" format as stated in the + * OpenSSL man page. + * i2d_PrivateKey should give us the correct DER encoding, so we'll just + * use PEM_ASN1_write_bio directly to write the DER encoding with the correct + * type header. */ + + int ppkey_id, pkey_base_id, ppkey_flags; + const char *pinfo, *ppem_str; + char pem_type_str[80]; + + // Lookup the ASN1 method information to get the pem type + if (EVP_PKEY_asn1_get0_info(&ppkey_id, &pkey_base_id, &ppkey_flags, &pinfo, &ppem_str, key->ameth) != 1) { + return 0; + } + // Set up the PEM type string + if (BIO_snprintf(pem_type_str, 80, "%s PRIVATE KEY", ppem_str) <= 0) { + // Failed to write out the pem type string, something is really wrong. + return 0; + } + // Write out everything to the BIO + return PEM_ASN1_write_bio((i2d_of_void *)i2d_PrivateKey, + pem_type_str, bio, key, enc, kstr, klen, cb, u); +} + +#endif + +/* + ************************************************ + * common implementation + ************************************************ + */ + +int X_shim_init() { + int rc = 0; + + OPENSSL_config(NULL); + ENGINE_load_builtin_engines(); + SSL_load_error_strings(); + SSL_library_init(); + OpenSSL_add_all_algorithms(); + // + // Set up OPENSSL thread safety callbacks. + rc = go_init_locks(); + if (rc != 0) { + return rc; + } + CRYPTO_set_locking_callback(go_thread_locking_callback); + CRYPTO_set_id_callback(go_thread_id_callback); + + rc = x_bio_init_methods(); + if (rc != 0) { + return rc; + } + + return 0; +} + +void * X_OPENSSL_malloc(size_t size) { + return OPENSSL_malloc(size); +} + +void X_OPENSSL_free(void *ref) { + OPENSSL_free(ref); +} + +long X_SSL_set_options(SSL* ssl, long options) { + return SSL_set_options(ssl, options); +} + +long X_SSL_get_options(SSL* ssl) { + return SSL_get_options(ssl); +} + +long X_SSL_clear_options(SSL* ssl, long options) { + return SSL_clear_options(ssl, options); +} + +long X_SSL_set_tlsext_host_name(SSL *ssl, const char *name) { + return SSL_set_tlsext_host_name(ssl, name); +} +const char * X_SSL_get_cipher_name(const SSL *ssl) { + return SSL_get_cipher_name(ssl); +} +int X_SSL_session_reused(SSL *ssl) { + return SSL_session_reused(ssl); +} + +int X_SSL_new_index() { + return SSL_get_ex_new_index(0, NULL, NULL, NULL, NULL); +} + +int X_SSL_verify_cb(int ok, X509_STORE_CTX* store) { + SSL* ssl = (SSL *)X509_STORE_CTX_get_ex_data(store, + SSL_get_ex_data_X509_STORE_CTX_idx()); + void* p = SSL_get_ex_data(ssl, get_ssl_idx()); + // get the pointer to the go Ctx object and pass it back into the thunk + return go_ssl_verify_cb_thunk(p, ok, store); +} + +const SSL_METHOD *X_SSLv23_method() { + return SSLv23_method(); +} + +const SSL_METHOD *X_SSLv3_method() { +#ifndef OPENSSL_NO_SSL3_METHOD + return SSLv3_method(); +#else + return NULL; +#endif +} + +const SSL_METHOD *X_TLSv1_method() { + return TLSv1_method(); +} + +const SSL_METHOD *X_TLSv1_1_method() { +#if defined(TLS1_1_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX) + return TLSv1_1_method(); +#else + return NULL; +#endif +} + +const SSL_METHOD *X_TLSv1_2_method() { +#if defined(TLS1_2_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX) + return TLSv1_2_method(); +#else + return NULL; +#endif +} + +int X_SSL_CTX_new_index() { + return SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, NULL); +} + +long X_SSL_CTX_set_options(SSL_CTX* ctx, long options) { + return SSL_CTX_set_options(ctx, options); +} + +long X_SSL_CTX_clear_options(SSL_CTX* ctx, long options) { + return SSL_CTX_clear_options(ctx, options); +} + +long X_SSL_CTX_get_options(SSL_CTX* ctx) { + return SSL_CTX_get_options(ctx); +} + +long X_SSL_CTX_set_mode(SSL_CTX* ctx, long modes) { + return SSL_CTX_set_mode(ctx, modes); +} + +long X_SSL_CTX_get_mode(SSL_CTX* ctx) { + return SSL_CTX_get_mode(ctx); +} + +long X_SSL_CTX_set_session_cache_mode(SSL_CTX* ctx, long modes) { + return SSL_CTX_set_session_cache_mode(ctx, modes); +} + +long X_SSL_CTX_sess_set_cache_size(SSL_CTX* ctx, long t) { + return SSL_CTX_sess_set_cache_size(ctx, t); +} + +long X_SSL_CTX_sess_get_cache_size(SSL_CTX* ctx) { + return SSL_CTX_sess_get_cache_size(ctx); +} + +long X_SSL_CTX_set_timeout(SSL_CTX* ctx, long t) { + return SSL_CTX_set_timeout(ctx, t); +} + +long X_SSL_CTX_get_timeout(SSL_CTX* ctx) { + return SSL_CTX_get_timeout(ctx); +} + +long X_SSL_CTX_add_extra_chain_cert(SSL_CTX* ctx, X509 *cert) { + return SSL_CTX_add_extra_chain_cert(ctx, cert); +} + +long X_SSL_CTX_set_tmp_ecdh(SSL_CTX* ctx, EC_KEY *key) { + return SSL_CTX_set_tmp_ecdh(ctx, key); +} + +long X_SSL_CTX_set_tlsext_servername_callback( + SSL_CTX* ctx, int (*cb)(SSL *con, int *ad, void *args)) { + return SSL_CTX_set_tlsext_servername_callback(ctx, cb); +} + +int X_SSL_CTX_verify_cb(int ok, X509_STORE_CTX* store) { + SSL* ssl = (SSL *)X509_STORE_CTX_get_ex_data(store, + SSL_get_ex_data_X509_STORE_CTX_idx()); + SSL_CTX* ssl_ctx = SSL_get_SSL_CTX(ssl); + void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx()); + // get the pointer to the go Ctx object and pass it back into the thunk + return go_ssl_ctx_verify_cb_thunk(p, ok, store); +} + +long X_SSL_CTX_set_tmp_dh(SSL_CTX* ctx, DH *dh) { + return SSL_CTX_set_tmp_dh(ctx, dh); +} + +long X_PEM_read_DHparams(SSL_CTX* ctx, DH *dh) { + return SSL_CTX_set_tmp_dh(ctx, dh); +} + +int X_SSL_CTX_set_tlsext_ticket_key_cb(SSL_CTX *sslctx, + int (*cb)(SSL *s, unsigned char key_name[16], + unsigned char iv[EVP_MAX_IV_LENGTH], + EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc)) { + return SSL_CTX_set_tlsext_ticket_key_cb(sslctx, cb); +} + +int X_SSL_CTX_ticket_key_cb(SSL *s, unsigned char key_name[16], + unsigned char iv[EVP_MAX_IV_LENGTH], + EVP_CIPHER_CTX *cctx, HMAC_CTX *hctx, int enc) { + + SSL_CTX* ssl_ctx = SSL_get_SSL_CTX(s); + void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx()); + // get the pointer to the go Ctx object and pass it back into the thunk + return go_ticket_key_cb_thunk(p, s, key_name, iv, cctx, hctx, enc); +} + +int X_BIO_get_flags(BIO *b) { + return BIO_get_flags(b); +} + +void X_BIO_set_flags(BIO *b, int flags) { + return BIO_set_flags(b, flags); +} + +void X_BIO_clear_flags(BIO *b, int flags) { + BIO_clear_flags(b, flags); +} + +int X_BIO_read(BIO *b, void *buf, int len) { + return BIO_read(b, buf, len); +} + +int X_BIO_write(BIO *b, const void *buf, int len) { + return BIO_write(b, buf, len); +} + +BIO *X_BIO_new_write_bio() { + return BIO_new(BIO_s_writeBio()); +} + +BIO *X_BIO_new_read_bio() { + return BIO_new(BIO_s_readBio()); +} + +const EVP_MD *X_EVP_get_digestbyname(const char *name) { + return EVP_get_digestbyname(name); +} + +const EVP_MD *X_EVP_md_null() { + return EVP_md_null(); +} + +const EVP_MD *X_EVP_md5() { + return EVP_md5(); +} + +const EVP_MD *X_EVP_md4() { + return EVP_md4(); +} + +const EVP_MD *X_EVP_ripemd160() { + return EVP_ripemd160(); +} + +const EVP_MD *X_EVP_sha224() { + return EVP_sha224(); +} + +const EVP_MD *X_EVP_sha1() { + return EVP_sha1(); +} + +const EVP_MD *X_EVP_sha256() { + return EVP_sha256(); +} + +const EVP_MD *X_EVP_sha384() { + return EVP_sha384(); +} + +const EVP_MD *X_EVP_sha512() { + return EVP_sha512(); +} + +int X_EVP_MD_size(const EVP_MD *md) { + return EVP_MD_size(md); +} + +int X_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl) { + return EVP_DigestInit_ex(ctx, type, impl); +} + +int X_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt) { + return EVP_DigestUpdate(ctx, d, cnt); +} + +int X_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s) { + return EVP_DigestFinal_ex(ctx, md, s); +} + +int X_EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type) { + return EVP_SignInit(ctx, type); +} + +int X_EVP_SignUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt) { + return EVP_SignUpdate(ctx, d, cnt); +} + +EVP_PKEY *X_EVP_PKEY_new(void) { + return EVP_PKEY_new(); +} + +void X_EVP_PKEY_free(EVP_PKEY *pkey) { + EVP_PKEY_free(pkey); +} + +int X_EVP_PKEY_size(EVP_PKEY *pkey) { + return EVP_PKEY_size(pkey); +} + +struct rsa_st *X_EVP_PKEY_get1_RSA(EVP_PKEY *pkey) { + return EVP_PKEY_get1_RSA(pkey); +} + +int X_EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key) { + return EVP_PKEY_set1_RSA(pkey, key); +} + +int X_EVP_PKEY_assign_charp(EVP_PKEY *pkey, int type, char *key) { + return EVP_PKEY_assign(pkey, type, key); +} + +int X_EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s, EVP_PKEY *pkey) { + return EVP_SignFinal(ctx, md, s, pkey); +} + +int X_EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type) { + return EVP_VerifyInit(ctx, type); +} + +int X_EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *d, + unsigned int cnt) { + return EVP_VerifyUpdate(ctx, d, cnt); +} + +int X_EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf, unsigned int siglen, EVP_PKEY *pkey) { + return EVP_VerifyFinal(ctx, sigbuf, siglen, pkey); +} + +int X_EVP_CIPHER_block_size(EVP_CIPHER *c) { + return EVP_CIPHER_block_size(c); +} + +int X_EVP_CIPHER_key_length(EVP_CIPHER *c) { + return EVP_CIPHER_key_length(c); +} + +int X_EVP_CIPHER_iv_length(EVP_CIPHER *c) { + return EVP_CIPHER_iv_length(c); +} + +int X_EVP_CIPHER_nid(EVP_CIPHER *c) { + return EVP_CIPHER_nid(c); +} + +int X_EVP_CIPHER_CTX_block_size(EVP_CIPHER_CTX *ctx) { + return EVP_CIPHER_CTX_block_size(ctx); +} + +int X_EVP_CIPHER_CTX_key_length(EVP_CIPHER_CTX *ctx) { + return EVP_CIPHER_CTX_key_length(ctx); +} + +int X_EVP_CIPHER_CTX_iv_length(EVP_CIPHER_CTX *ctx) { + return EVP_CIPHER_CTX_iv_length(ctx); +} + +void X_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int padding) { + //openssl always returns 1 for set_padding + //hence return value is not checked + EVP_CIPHER_CTX_set_padding(ctx, padding); +} + +const EVP_CIPHER *X_EVP_CIPHER_CTX_cipher(EVP_CIPHER_CTX *ctx) { + return EVP_CIPHER_CTX_cipher(ctx); +} + +int X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid) { + return EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, nid); +} + +size_t X_HMAC_size(const HMAC_CTX *e) { + return HMAC_size(e); +} + +int X_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl) { + return HMAC_Init_ex(ctx, key, len, md, impl); +} + +int X_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len) { + return HMAC_Update(ctx, data, len); +} + +int X_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len) { + return HMAC_Final(ctx, md, len); +} + +int X_sk_X509_num(STACK_OF(X509) *sk) { + return sk_X509_num(sk); +} + +X509 *X_sk_X509_value(STACK_OF(X509)* sk, int i) { + return sk_X509_value(sk, i); +} + +long X_X509_get_version(const X509 *x) { + return X509_get_version(x); +} + +int X_X509_set_version(X509 *x, long version) { + return X509_set_version(x, version); +} diff --git a/vendor/github.com/libp2p/go-openssl/shim.h b/vendor/github.com/libp2p/go-openssl/shim.h new file mode 100644 index 0000000000..75ee0b19f0 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/shim.h @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2014 Space Monkey, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifndef SSL_MODE_RELEASE_BUFFERS +#define SSL_MODE_RELEASE_BUFFERS 0 +#endif + +#ifndef SSL_OP_NO_COMPRESSION +#define SSL_OP_NO_COMPRESSION 0 +#endif + +/* shim methods */ +extern int X_shim_init(); + +/* Library methods */ +extern void X_OPENSSL_free(void *ref); +extern void *X_OPENSSL_malloc(size_t size); + +/* SSL methods */ +extern long X_SSL_set_options(SSL* ssl, long options); +extern long X_SSL_get_options(SSL* ssl); +extern long X_SSL_clear_options(SSL* ssl, long options); +extern long X_SSL_set_tlsext_host_name(SSL *ssl, const char *name); +extern const char * X_SSL_get_cipher_name(const SSL *ssl); +extern int X_SSL_session_reused(SSL *ssl); +extern int X_SSL_new_index(); + +extern const SSL_METHOD *X_SSLv23_method(); +extern const SSL_METHOD *X_SSLv3_method(); +extern const SSL_METHOD *X_TLSv1_method(); +extern const SSL_METHOD *X_TLSv1_1_method(); +extern const SSL_METHOD *X_TLSv1_2_method(); + +#if defined SSL_CTRL_SET_TLSEXT_HOSTNAME +extern int sni_cb(SSL *ssl_conn, int *ad, void *arg); +#endif +extern int X_SSL_verify_cb(int ok, X509_STORE_CTX* store); + +/* SSL_CTX methods */ +extern int X_SSL_CTX_new_index(); +extern long X_SSL_CTX_set_options(SSL_CTX* ctx, long options); +extern long X_SSL_CTX_clear_options(SSL_CTX* ctx, long options); +extern long X_SSL_CTX_get_options(SSL_CTX* ctx); +extern long X_SSL_CTX_set_mode(SSL_CTX* ctx, long modes); +extern long X_SSL_CTX_get_mode(SSL_CTX* ctx); +extern long X_SSL_CTX_set_session_cache_mode(SSL_CTX* ctx, long modes); +extern long X_SSL_CTX_sess_set_cache_size(SSL_CTX* ctx, long t); +extern long X_SSL_CTX_sess_get_cache_size(SSL_CTX* ctx); +extern long X_SSL_CTX_set_timeout(SSL_CTX* ctx, long t); +extern long X_SSL_CTX_get_timeout(SSL_CTX* ctx); +extern long X_SSL_CTX_add_extra_chain_cert(SSL_CTX* ctx, X509 *cert); +extern long X_SSL_CTX_set_tmp_ecdh(SSL_CTX* ctx, EC_KEY *key); +extern long X_SSL_CTX_set_tlsext_servername_callback(SSL_CTX* ctx, int (*cb)(SSL *con, int *ad, void *args)); +extern int X_SSL_CTX_verify_cb(int ok, X509_STORE_CTX* store); +extern long X_SSL_CTX_set_tmp_dh(SSL_CTX* ctx, DH *dh); +extern long X_PEM_read_DHparams(SSL_CTX* ctx, DH *dh); +extern int X_SSL_CTX_set_tlsext_ticket_key_cb(SSL_CTX *sslctx, + int (*cb)(SSL *s, unsigned char key_name[16], + unsigned char iv[EVP_MAX_IV_LENGTH], + EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc)); +extern int X_SSL_CTX_ticket_key_cb(SSL *s, unsigned char key_name[16], + unsigned char iv[EVP_MAX_IV_LENGTH], + EVP_CIPHER_CTX *cctx, HMAC_CTX *hctx, int enc); + +/* BIO methods */ +extern int X_BIO_get_flags(BIO *b); +extern void X_BIO_set_flags(BIO *bio, int flags); +extern void X_BIO_clear_flags(BIO *bio, int flags); +extern void X_BIO_set_data(BIO *bio, void* data); +extern void *X_BIO_get_data(BIO *bio); +extern int X_BIO_read(BIO *b, void *buf, int len); +extern int X_BIO_write(BIO *b, const void *buf, int len); +extern BIO *X_BIO_new_write_bio(); +extern BIO *X_BIO_new_read_bio(); + +/* EVP methods */ +extern const int X_ED25519_SUPPORT; +extern int X_EVP_PKEY_ED25519; +extern const EVP_MD *X_EVP_get_digestbyname(const char *name); +extern EVP_MD_CTX *X_EVP_MD_CTX_new(); +extern void X_EVP_MD_CTX_free(EVP_MD_CTX *ctx); +extern const EVP_MD *X_EVP_md_null(); +extern const EVP_MD *X_EVP_md5(); +extern const EVP_MD *X_EVP_md4(); +extern const EVP_MD *X_EVP_sha(); +extern const EVP_MD *X_EVP_sha1(); +extern const EVP_MD *X_EVP_dss(); +extern const EVP_MD *X_EVP_dss1(); +extern const EVP_MD *X_EVP_ripemd160(); +extern const EVP_MD *X_EVP_sha224(); +extern const EVP_MD *X_EVP_sha256(); +extern const EVP_MD *X_EVP_sha384(); +extern const EVP_MD *X_EVP_sha512(); +extern int X_EVP_MD_size(const EVP_MD *md); +extern int X_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); +extern int X_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt); +extern int X_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s); +extern int X_EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type); +extern int X_EVP_SignUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt); +extern int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); +extern int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, size_t *siglen, const unsigned char *tbs, size_t tbslen); +extern EVP_PKEY *X_EVP_PKEY_new(void); +extern void X_EVP_PKEY_free(EVP_PKEY *pkey); +extern int X_EVP_PKEY_size(EVP_PKEY *pkey); +extern struct rsa_st *X_EVP_PKEY_get1_RSA(EVP_PKEY *pkey); +extern int X_EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key); +extern int X_EVP_PKEY_assign_charp(EVP_PKEY *pkey, int type, char *key); +extern int X_EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s, EVP_PKEY *pkey); +extern int X_EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type); +extern int X_EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt); +extern int X_EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf, unsigned int siglen, EVP_PKEY *pkey); +extern int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); +extern int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, size_t siglen, const unsigned char *tbs, size_t tbslen); +extern int X_EVP_CIPHER_block_size(EVP_CIPHER *c); +extern int X_EVP_CIPHER_key_length(EVP_CIPHER *c); +extern int X_EVP_CIPHER_iv_length(EVP_CIPHER *c); +extern int X_EVP_CIPHER_nid(EVP_CIPHER *c); +extern int X_EVP_CIPHER_CTX_block_size(EVP_CIPHER_CTX *ctx); +extern int X_EVP_CIPHER_CTX_key_length(EVP_CIPHER_CTX *ctx); +extern int X_EVP_CIPHER_CTX_iv_length(EVP_CIPHER_CTX *ctx); +extern void X_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int padding); +extern const EVP_CIPHER *X_EVP_CIPHER_CTX_cipher(EVP_CIPHER_CTX *ctx); +extern int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx); +extern int X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid); + +/* HMAC methods */ +extern size_t X_HMAC_size(const HMAC_CTX *e); +extern HMAC_CTX *X_HMAC_CTX_new(void); +extern void X_HMAC_CTX_free(HMAC_CTX *ctx); +extern int X_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl); +extern int X_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len); +extern int X_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len); + +/* X509 methods */ +extern int X_X509_add_ref(X509* x509); +extern const ASN1_TIME *X_X509_get0_notBefore(const X509 *x); +extern const ASN1_TIME *X_X509_get0_notAfter(const X509 *x); +extern int X_sk_X509_num(STACK_OF(X509) *sk); +extern X509 *X_sk_X509_value(STACK_OF(X509)* sk, int i); +extern long X_X509_get_version(const X509 *x); +extern int X_X509_set_version(X509 *x, long version); + +/* PEM methods */ +extern int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u); + +/* Object methods */ +extern int OBJ_create(const char *oid,const char *sn,const char *ln); \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-openssl/sni.c b/vendor/github.com/libp2p/go-openssl/sni.c new file mode 100644 index 0000000000..f9e8d16b0e --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/sni.c @@ -0,0 +1,23 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "_cgo_export.h" +#include + +int sni_cb(SSL *con, int *ad, void *arg) { + SSL_CTX* ssl_ctx = ssl_ctx = SSL_get_SSL_CTX(con); + void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx()); + return sni_cb_thunk(p, con, ad, arg); +} diff --git a/vendor/github.com/libp2p/go-openssl/ssl.go b/vendor/github.com/libp2p/go-openssl/ssl.go new file mode 100644 index 0000000000..117c30c0f9 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/ssl.go @@ -0,0 +1,170 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "os" + "unsafe" +) + +type SSLTLSExtErr int + +const ( + SSLTLSExtErrOK SSLTLSExtErr = C.SSL_TLSEXT_ERR_OK + SSLTLSExtErrAlertWarning SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_WARNING + SSLTLSEXTErrAlertFatal SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_FATAL + SSLTLSEXTErrNoAck SSLTLSExtErr = C.SSL_TLSEXT_ERR_NOACK +) + +var ( + ssl_idx = C.X_SSL_new_index() +) + +//export get_ssl_idx +func get_ssl_idx() C.int { + return ssl_idx +} + +type SSL struct { + ssl *C.SSL + verify_cb VerifyCallback +} + +//export go_ssl_verify_cb_thunk +func go_ssl_verify_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int { + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: verify callback panic'd: %v", err) + os.Exit(1) + } + }() + verify_cb := (*SSL)(p).verify_cb + // set up defaults just in case verify_cb is nil + if verify_cb != nil { + store := &CertificateStoreCtx{ctx: ctx} + if verify_cb(ok == 1, store) { + ok = 1 + } else { + ok = 0 + } + } + return ok +} + +// Wrapper around SSL_get_servername. Returns server name according to rfc6066 +// http://tools.ietf.org/html/rfc6066. +func (s *SSL) GetServername() string { + return C.GoString(C.SSL_get_servername(s.ssl, C.TLSEXT_NAMETYPE_host_name)) +} + +// GetOptions returns SSL options. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html +func (s *SSL) GetOptions() Options { + return Options(C.X_SSL_get_options(s.ssl)) +} + +// SetOptions sets SSL options. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html +func (s *SSL) SetOptions(options Options) Options { + return Options(C.X_SSL_set_options(s.ssl, C.long(options))) +} + +// ClearOptions clear SSL options. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html +func (s *SSL) ClearOptions(options Options) Options { + return Options(C.X_SSL_clear_options(s.ssl, C.long(options))) +} + +// SetVerify controls peer verification settings. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (s *SSL) SetVerify(options VerifyOptions, verify_cb VerifyCallback) { + s.verify_cb = verify_cb + if verify_cb != nil { + C.SSL_set_verify(s.ssl, C.int(options), (*[0]byte)(C.X_SSL_verify_cb)) + } else { + C.SSL_set_verify(s.ssl, C.int(options), nil) + } +} + +// SetVerifyMode controls peer verification setting. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (s *SSL) SetVerifyMode(options VerifyOptions) { + s.SetVerify(options, s.verify_cb) +} + +// SetVerifyCallback controls peer verification setting. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (s *SSL) SetVerifyCallback(verify_cb VerifyCallback) { + s.SetVerify(s.VerifyMode(), verify_cb) +} + +// GetVerifyCallback returns callback function. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (s *SSL) GetVerifyCallback() VerifyCallback { + return s.verify_cb +} + +// VerifyMode returns peer verification setting. See +// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (s *SSL) VerifyMode() VerifyOptions { + return VerifyOptions(C.SSL_get_verify_mode(s.ssl)) +} + +// SetVerifyDepth controls how many certificates deep the certificate +// verification logic is willing to follow a certificate chain. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (s *SSL) SetVerifyDepth(depth int) { + C.SSL_set_verify_depth(s.ssl, C.int(depth)) +} + +// GetVerifyDepth controls how many certificates deep the certificate +// verification logic is willing to follow a certificate chain. See +// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html +func (s *SSL) GetVerifyDepth() int { + return int(C.SSL_get_verify_depth(s.ssl)) +} + +// SetSSLCtx changes context to new one. Useful for Server Name Indication (SNI) +// rfc6066 http://tools.ietf.org/html/rfc6066. See +// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni +func (s *SSL) SetSSLCtx(ctx *Ctx) { + /* + * SSL_set_SSL_CTX() only changes certs as of 1.0.0d + * adjust other things we care about + */ + C.SSL_set_SSL_CTX(s.ssl, ctx.ctx) +} + +//export sni_cb_thunk +func sni_cb_thunk(p unsafe.Pointer, con *C.SSL, ad unsafe.Pointer, arg unsafe.Pointer) C.int { + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: verify callback sni panic'd: %v", err) + os.Exit(1) + } + }() + + sni_cb := (*Ctx)(p).sni_cb + + s := &SSL{ssl: con} + // This attaches a pointer to our SSL struct into the SNI callback. + C.SSL_set_ex_data(s.ssl, get_ssl_idx(), unsafe.Pointer(s)) + + // Note: this is ctx.sni_cb, not C.sni_cb + return C.int(sni_cb(s)) +} diff --git a/vendor/github.com/libp2p/go-openssl/tickets.go b/vendor/github.com/libp2p/go-openssl/tickets.go new file mode 100644 index 0000000000..a064d38592 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/tickets.go @@ -0,0 +1,222 @@ +// Copyright (C) 2017. See AUTHORS. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openssl + +// #include "shim.h" +import "C" + +import ( + "os" + "unsafe" +) + +const ( + KeyNameSize = 16 +) + +// TicketCipherCtx describes the cipher that will be used by the ticket store +// for encrypting the tickets. Engine may be nil if no engine is desired. +type TicketCipherCtx struct { + Cipher *Cipher + Engine *Engine +} + +// TicketDigestCtx describes the digest that will be used by the ticket store +// to authenticate the data. Engine may be nil if no engine is desired. +type TicketDigestCtx struct { + Digest *Digest + Engine *Engine +} + +// TicketName is an identifier for the key material for a ticket. +type TicketName [KeyNameSize]byte + +// TicketKey is the key material for a ticket. If this is lost, forward secrecy +// is lost as it allows decrypting TLS sessions retroactively. +type TicketKey struct { + Name TicketName + CipherKey []byte + HMACKey []byte + IV []byte +} + +// TicketKeyManager is a manager for TicketKeys. It allows one to control the +// lifetime of tickets, causing renewals and expirations for keys that are +// created. Calls to the manager are serialized. +type TicketKeyManager interface { + // New should create a brand new TicketKey with a new name. + New() *TicketKey + + // Current should return a key that is still valid. + Current() *TicketKey + + // Lookup should return a key with the given name, or nil if no name + // exists. + Lookup(name TicketName) *TicketKey + + // Expired should return if the key with the given name is expired and + // should not be used any more. + Expired(name TicketName) bool + + // ShouldRenew should return if the key is still ok to use for the current + // session, but we should send a new key for the client. + ShouldRenew(name TicketName) bool +} + +// TicketStore descibes the encryption and authentication methods the tickets +// will use along with a key manager for generating and keeping track of the +// secrets. +type TicketStore struct { + CipherCtx TicketCipherCtx + DigestCtx TicketDigestCtx + Keys TicketKeyManager +} + +func (t *TicketStore) cipherEngine() *C.ENGINE { + if t.CipherCtx.Engine == nil { + return nil + } + return t.CipherCtx.Engine.e +} + +func (t *TicketStore) digestEngine() *C.ENGINE { + if t.DigestCtx.Engine == nil { + return nil + } + return t.DigestCtx.Engine.e +} + +const ( + // instruct to do a handshake + ticket_resp_requireHandshake = 0 + // crypto context is set up correctly + ticket_resp_sessionOk = 1 + // crypto context is ok, but the ticket should be reissued + ticket_resp_renewSession = 2 + // we had a problem that shouldn't fall back to doing a handshake + ticket_resp_error = -1 + + // asked to create session crypto context + ticket_req_newSession = 1 + // asked to load crypto context for a previous session + ticket_req_lookupSession = 0 +) + +//export go_ticket_key_cb_thunk +func go_ticket_key_cb_thunk(p unsafe.Pointer, s *C.SSL, key_name *C.uchar, + iv *C.uchar, cctx *C.EVP_CIPHER_CTX, hctx *C.HMAC_CTX, enc C.int) C.int { + + // no panic's allowed. it's super hard to guarantee any state at this point + // so just abort everything. + defer func() { + if err := recover(); err != nil { + logger.Critf("openssl: ticket key callback panic'd: %v", err) + os.Exit(1) + } + }() + + ctx := (*Ctx)(p) + store := ctx.ticket_store + if store == nil { + // TODO(jeff): should this be an error condition? it doesn't make sense + // to be called if we don't have a store I believe, but that's probably + // not worth aborting the handshake which is what I believe returning + // an error would do. + return ticket_resp_requireHandshake + } + + ctx.ticket_store_mu.Lock() + defer ctx.ticket_store_mu.Unlock() + + switch enc { + case ticket_req_newSession: + key := store.Keys.Current() + if key == nil { + key = store.Keys.New() + if key == nil { + return ticket_resp_requireHandshake + } + } + + C.memcpy( + unsafe.Pointer(key_name), + unsafe.Pointer(&key.Name[0]), + KeyNameSize) + C.EVP_EncryptInit_ex( + cctx, + store.CipherCtx.Cipher.ptr, + store.cipherEngine(), + (*C.uchar)(&key.CipherKey[0]), + (*C.uchar)(&key.IV[0])) + C.HMAC_Init_ex( + hctx, + unsafe.Pointer(&key.HMACKey[0]), + C.int(len(key.HMACKey)), + store.DigestCtx.Digest.ptr, + store.digestEngine()) + + return ticket_resp_sessionOk + + case ticket_req_lookupSession: + var name TicketName + C.memcpy( + unsafe.Pointer(&name[0]), + unsafe.Pointer(key_name), + KeyNameSize) + + key := store.Keys.Lookup(name) + if key == nil { + return ticket_resp_requireHandshake + } + if store.Keys.Expired(name) { + return ticket_resp_requireHandshake + } + + C.EVP_DecryptInit_ex( + cctx, + store.CipherCtx.Cipher.ptr, + store.cipherEngine(), + (*C.uchar)(&key.CipherKey[0]), + (*C.uchar)(&key.IV[0])) + C.HMAC_Init_ex( + hctx, + unsafe.Pointer(&key.HMACKey[0]), + C.int(len(key.HMACKey)), + store.DigestCtx.Digest.ptr, + store.digestEngine()) + + if store.Keys.ShouldRenew(name) { + return ticket_resp_renewSession + } + + return ticket_resp_sessionOk + + default: + return ticket_resp_error + } +} + +// SetTicketStore sets the ticket store for the context so that clients can do +// ticket based session resumption. If the store is nil, the +func (c *Ctx) SetTicketStore(store *TicketStore) { + c.ticket_store = store + + if store == nil { + C.X_SSL_CTX_set_tlsext_ticket_key_cb(c.ctx, nil) + } else { + C.X_SSL_CTX_set_tlsext_ticket_key_cb(c.ctx, + (*[0]byte)(C.X_SSL_CTX_ticket_key_cb)) + } +} diff --git a/vendor/github.com/libp2p/go-openssl/utils/errors.go b/vendor/github.com/libp2p/go-openssl/utils/errors.go new file mode 100644 index 0000000000..bab314c95d --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/utils/errors.go @@ -0,0 +1,50 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "errors" + "strings" +) + +// ErrorGroup collates errors +type ErrorGroup struct { + Errors []error +} + +// Add adds an error to an existing error group +func (e *ErrorGroup) Add(err error) { + if err != nil { + e.Errors = append(e.Errors, err) + } +} + +// Finalize returns an error corresponding to the ErrorGroup state. If there's +// no errors in the group, finalize returns nil. If there's only one error, +// Finalize returns that error. Otherwise, Finalize will make a new error +// consisting of the messages from the constituent errors. +func (e *ErrorGroup) Finalize() error { + if len(e.Errors) == 0 { + return nil + } + if len(e.Errors) == 1 { + return e.Errors[0] + } + msgs := make([]string, 0, len(e.Errors)) + for _, err := range e.Errors { + msgs = append(msgs, err.Error()) + } + return errors.New(strings.Join(msgs, "\n")) +} diff --git a/vendor/github.com/libp2p/go-openssl/utils/future.go b/vendor/github.com/libp2p/go-openssl/utils/future.go new file mode 100644 index 0000000000..fa1bbbfb86 --- /dev/null +++ b/vendor/github.com/libp2p/go-openssl/utils/future.go @@ -0,0 +1,79 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "sync" +) + +// Future is a type that is essentially the inverse of a channel. With a +// channel, you have multiple senders and one receiver. With a future, you can +// have multiple receivers and one sender. Additionally, a future protects +// against double-sends. Since this is usually used for returning function +// results, we also capture and return error values as well. Use NewFuture +// to initialize. +type Future struct { + mutex *sync.Mutex + cond *sync.Cond + received bool + val interface{} + err error +} + +// NewFuture returns an initialized and ready Future. +func NewFuture() *Future { + mutex := &sync.Mutex{} + return &Future{ + mutex: mutex, + cond: sync.NewCond(mutex), + received: false, + val: nil, + err: nil, + } +} + +// Get blocks until the Future has a value set. +func (self *Future) Get() (interface{}, error) { + self.mutex.Lock() + defer self.mutex.Unlock() + for { + if self.received { + return self.val, self.err + } + self.cond.Wait() + } +} + +// Fired returns whether or not a value has been set. If Fired is true, Get +// won't block. +func (self *Future) Fired() bool { + self.mutex.Lock() + defer self.mutex.Unlock() + return self.received +} + +// Set provides the value to present and future Get calls. If Set has already +// been called, this is a no-op. +func (self *Future) Set(val interface{}, err error) { + self.mutex.Lock() + defer self.mutex.Unlock() + if self.received { + return + } + self.received = true + self.val = val + self.err = err + self.cond.Broadcast() +} diff --git a/vendor/github.com/minio/sha256-simd/.github/workflows/go.yml b/vendor/github.com/minio/sha256-simd/.github/workflows/go.yml new file mode 100644 index 0000000000..a59b88efe8 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/.github/workflows/go.yml @@ -0,0 +1,39 @@ +name: Go + +on: + pull_request: + branches: + - master + push: + branches: + - master + +jobs: + build: + name: Test on Go ${{ matrix.go-version }} and ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + max-parallel: 4 + matrix: + go-version: [1.13.x, 1.12.x] + os: [ubuntu-latest, windows-latest] + steps: + - name: Set up Go ${{ matrix.go-version }} + uses: actions/setup-go@v1 + with: + go-version: ${{ matrix.go-version }} + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v1 + + - name: Build on ${{ matrix.os }} + if: matrix.os == 'windows-latest' + run: go test -race -v ./... + - name: Build on ${{ matrix.os }} + if: matrix.os == 'ubuntu-latest' + run: | + diff -au <(gofmt -d .) <(printf "") + go test -race -v ./... + go vet -asmdecl . + ./test-architectures.sh diff --git a/vendor/github.com/minio/sha256-simd/.gitignore b/vendor/github.com/minio/sha256-simd/.gitignore new file mode 100644 index 0000000000..c56069fe26 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/.gitignore @@ -0,0 +1 @@ +*.test \ No newline at end of file diff --git a/vendor/github.com/minio/sha256-simd/LICENSE b/vendor/github.com/minio/sha256-simd/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/sha256-simd/README.md b/vendor/github.com/minio/sha256-simd/README.md new file mode 100644 index 0000000000..5282d83ad7 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/README.md @@ -0,0 +1,133 @@ +# sha256-simd + +Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions and AVX2 for Intel and ARM64 for ARM. On AVX512 it provides an up to 8x improvement (over 3 GB/s per core) in comparison to AVX2. SHA Extensions give a performance boost of close to 4x over AVX2. + +## Introduction + +This package is designed as a replacement for `crypto/sha256`. For Intel CPUs it has two flavors for AVX512 and AVX2 (AVX/SSE are also supported). For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement. + +This package uses Golang assembly. The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al. + +## New: Support for Intel SHA Extensions + +Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)). + +``` +$ benchcmp avx2.txt sha-ext.txt +benchmark AVX2 MB/s SHA Ext MB/s speedup +BenchmarkHash5M 514.40 1975.17 3.84x +``` + +Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, the other changes increased everything roughly 50%. + +## Support for AVX512 + +We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU): + +``` +$ benchcmp avx2.txt avx512.txt +benchmark AVX2 MB/s AVX512 MB/s speedup +BenchmarkHash5M 448.62 3498.20 7.80x +``` + +The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide). + +Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message. + +Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice. + +Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion: + +```go +import "github.com/minio/sha256-simd" + +func main() { + server := sha256.NewAvx512Server() + h512 := sha256.NewAvx512(server) + h512.Write(fileBlock) + digest := h512.Sum([]byte{}) +} +``` + +Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance: +* Have many go routines doing SHA256 calculations in parallel. +* Try to Write() messages in multiples of 64 bytes. +* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible). + +More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores. + +## Drop-In Replacement + +The following code snippet shows how you can use `github.com/minio/sha256-simd`. This will automatically select the fastest method for the architecture on which it will be executed. + +```go +import "github.com/minio/sha256-simd" + +func main() { + ... + shaWriter := sha256.New() + io.Copy(shaWriter, file) + ... +} +``` + +## Performance + +Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB. + +| Processor | SIMD | Speed (MB/s) | +| --------------------------------- | ------- | ------------:| +| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 | +| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 | +| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 | +| 3.0 GHz Intel Xeon Platinum 8124M | AVX2 | 449 | +| 3.1 GHz Intel Core i7 | AVX | 362 | +| 3.1 GHz Intel Core i7 | SSE | 299 | + +## asm2plan9s + +In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. + +## Why and benefits + +One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server. + +Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc. + +## ARM SHA Extensions + +The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)). + + ``` + sha256h q2, q3, v9.4s + sha256h2 q3, q4, v9.4s + sha256su0 v5.4s, v6.4s + rev32 v8.16b, v8.16b + add v9.4s, v7.4s, v18.4s + mov v4.16b, v2.16b + sha256h q2, q3, v10.4s + sha256h2 q3, q4, v10.4s + sha256su0 v6.4s, v7.4s + sha256su1 v5.4s, v7.4s, v8.4s + ``` + +### Detailed benchmarks + +Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/). + +``` +minio@minio-arm:$ benchcmp golang.txt arm64.txt +benchmark golang arm64 speedup +BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x +BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x +BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x +BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x +``` + +## License + +Released under the Apache License v2.0. You can find the complete text in the file LICENSE. + +## Contributing + +Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/sha256-simd/cpuid.go b/vendor/github.com/minio/sha256-simd/cpuid.go new file mode 100644 index 0000000000..878ad4638c --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid.go @@ -0,0 +1,119 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +// True when SIMD instructions are available. +var avx512 bool +var avx2 bool +var avx bool +var sse bool +var sse2 bool +var sse3 bool +var ssse3 bool +var sse41 bool +var sse42 bool +var popcnt bool +var sha bool +var armSha = haveArmSha() + +func init() { + var _xsave bool + var _osxsave bool + var _avx bool + var _avx2 bool + var _avx512f bool + var _avx512dq bool + // var _avx512pf bool + // var _avx512er bool + // var _avx512cd bool + var _avx512bw bool + var _avx512vl bool + var _sseState bool + var _avxState bool + var _opmaskState bool + var _zmmHI256State bool + var _hi16ZmmState bool + + mfi, _, _, _ := cpuid(0) + + if mfi >= 1 { + _, _, c, d := cpuid(1) + + sse = (d & (1 << 25)) != 0 + sse2 = (d & (1 << 26)) != 0 + sse3 = (c & (1 << 0)) != 0 + ssse3 = (c & (1 << 9)) != 0 + sse41 = (c & (1 << 19)) != 0 + sse42 = (c & (1 << 20)) != 0 + popcnt = (c & (1 << 23)) != 0 + _xsave = (c & (1 << 26)) != 0 + _osxsave = (c & (1 << 27)) != 0 + _avx = (c & (1 << 28)) != 0 + } + + if mfi >= 7 { + _, b, _, _ := cpuid(7) + + _avx2 = (b & (1 << 5)) != 0 + _avx512f = (b & (1 << 16)) != 0 + _avx512dq = (b & (1 << 17)) != 0 + // _avx512pf = (b & (1 << 26)) != 0 + // _avx512er = (b & (1 << 27)) != 0 + // _avx512cd = (b & (1 << 28)) != 0 + _avx512bw = (b & (1 << 30)) != 0 + _avx512vl = (b & (1 << 31)) != 0 + sha = (b & (1 << 29)) != 0 + } + + // Stop here if XSAVE unsupported or not enabled + if !_xsave || !_osxsave { + return + } + + if _xsave && _osxsave { + a, _ := xgetbv(0) + + _sseState = (a & (1 << 1)) != 0 + _avxState = (a & (1 << 2)) != 0 + _opmaskState = (a & (1 << 5)) != 0 + _zmmHI256State = (a & (1 << 6)) != 0 + _hi16ZmmState = (a & (1 << 7)) != 0 + } else { + _sseState = true + } + + // Very unlikely that OS would enable XSAVE and then disable SSE + if !_sseState { + sse = false + sse2 = false + sse3 = false + ssse3 = false + sse41 = false + sse42 = false + } + + if _avxState { + avx = _avx + avx2 = _avx2 + } + + if _opmaskState && _zmmHI256State && _hi16ZmmState { + avx512 = (_avx512f && + _avx512dq && + _avx512bw && + _avx512vl) + } +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_386.go b/vendor/github.com/minio/sha256-simd/cpuid_386.go new file mode 100644 index 0000000000..c9890be478 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_386.go @@ -0,0 +1,24 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func xgetbv(index uint32) (eax, edx uint32) + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_386.s b/vendor/github.com/minio/sha256-simd/cpuid_386.s new file mode 100644 index 0000000000..1511cd6f60 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_386.s @@ -0,0 +1,53 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015 Klaus Post +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// +build 386,!gccgo + +// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·xgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET diff --git a/vendor/github.com/minio/sha256-simd/cpuid_amd64.go b/vendor/github.com/minio/sha256-simd/cpuid_amd64.go new file mode 100644 index 0000000000..c9890be478 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_amd64.go @@ -0,0 +1,24 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func xgetbv(index uint32) (eax, edx uint32) + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_amd64.s b/vendor/github.com/minio/sha256-simd/cpuid_amd64.s new file mode 100644 index 0000000000..b0f414748a --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_amd64.s @@ -0,0 +1,53 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015 Klaus Post +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// +build amd64,!gccgo + +// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·xgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/minio/sha256-simd/cpuid_arm.go b/vendor/github.com/minio/sha256-simd/cpuid_arm.go new file mode 100644 index 0000000000..351dff4b6b --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_arm.go @@ -0,0 +1,32 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go b/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go new file mode 100644 index 0000000000..e739996d91 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go @@ -0,0 +1,49 @@ +// +build arm64,linux + +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +import ( + "bytes" + "io/ioutil" +) + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +// File to check for cpu capabilities. +const procCPUInfo = "/proc/cpuinfo" + +// Feature to check for. +const sha256Feature = "sha2" + +func haveArmSha() bool { + cpuInfo, err := ioutil.ReadFile(procCPUInfo) + if err != nil { + return false + } + return bytes.Contains(cpuInfo, []byte(sha256Feature)) +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go new file mode 100644 index 0000000000..3e44158282 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_other.go @@ -0,0 +1,34 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +build !386,!amd64,!arm,!arm64 arm64,!linux + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/go.mod b/vendor/github.com/minio/sha256-simd/go.mod new file mode 100644 index 0000000000..4451e9eb21 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/go.mod @@ -0,0 +1,3 @@ +module github.com/minio/sha256-simd + +go 1.12 diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go new file mode 100644 index 0000000000..4e1f6d2f76 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256.go @@ -0,0 +1,409 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "crypto/sha256" + "encoding/binary" + "hash" + "runtime" +) + +// Size - The size of a SHA256 checksum in bytes. +const Size = 32 + +// BlockSize - The blocksize of SHA256 in bytes. +const BlockSize = 64 + +const ( + chunk = BlockSize + init0 = 0x6A09E667 + init1 = 0xBB67AE85 + init2 = 0x3C6EF372 + init3 = 0xA54FF53A + init4 = 0x510E527F + init5 = 0x9B05688C + init6 = 0x1F83D9AB + init7 = 0x5BE0CD19 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint32 + x [chunk]byte + nx int + len uint64 +} + +// Reset digest back to default +func (d *digest) Reset() { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + d.nx = 0 + d.len = 0 +} + +type blockfuncType int + +const ( + blockfuncGeneric blockfuncType = iota + blockfuncAvx512 blockfuncType = iota + blockfuncAvx2 blockfuncType = iota + blockfuncAvx blockfuncType = iota + blockfuncSsse blockfuncType = iota + blockfuncSha blockfuncType = iota + blockfuncArm blockfuncType = iota +) + +var blockfunc blockfuncType + +func init() { + is386bit := runtime.GOARCH == "386" + isARM := runtime.GOARCH == "arm" + switch { + case is386bit || isARM: + blockfunc = blockfuncGeneric + case sha && ssse3 && sse41: + blockfunc = blockfuncSha + case avx2: + blockfunc = blockfuncAvx2 + case avx: + blockfunc = blockfuncAvx + case ssse3: + blockfunc = blockfuncSsse + case armSha: + blockfunc = blockfuncArm + default: + blockfunc = blockfuncGeneric + } +} + +// New returns a new hash.Hash computing the SHA256 checksum. +func New() hash.Hash { + if blockfunc != blockfuncGeneric { + d := new(digest) + d.Reset() + return d + } + // Fallback to the standard golang implementation + // if no features were found. + return sha256.New() +} + +// Sum256 - single caller sha256 helper +func Sum256(data []byte) (result [Size]byte) { + var d digest + d.Reset() + d.Write(data) + result = d.checkSum() + return +} + +// Return size of checksum +func (d *digest) Size() int { return Size } + +// Return blocksize of checksum +func (d *digest) BlockSize() int { return BlockSize } + +// Write to digest +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Return sha256 sum in bytes +func (d *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:]...) +} + +// Intermediate checksum function +func (d *digest) checkSum() (digest [Size]byte) { + n := d.nx + + var k [64]byte + copy(k[:], d.x[:n]) + + k[n] = 0x80 + + if n >= 56 { + block(d, k[:]) + + // clear block buffer - go compiles this to optimal 1x xorps + 4x movups + // unfortunately expressing this more succinctly results in much worse code + k[0] = 0 + k[1] = 0 + k[2] = 0 + k[3] = 0 + k[4] = 0 + k[5] = 0 + k[6] = 0 + k[7] = 0 + k[8] = 0 + k[9] = 0 + k[10] = 0 + k[11] = 0 + k[12] = 0 + k[13] = 0 + k[14] = 0 + k[15] = 0 + k[16] = 0 + k[17] = 0 + k[18] = 0 + k[19] = 0 + k[20] = 0 + k[21] = 0 + k[22] = 0 + k[23] = 0 + k[24] = 0 + k[25] = 0 + k[26] = 0 + k[27] = 0 + k[28] = 0 + k[29] = 0 + k[30] = 0 + k[31] = 0 + k[32] = 0 + k[33] = 0 + k[34] = 0 + k[35] = 0 + k[36] = 0 + k[37] = 0 + k[38] = 0 + k[39] = 0 + k[40] = 0 + k[41] = 0 + k[42] = 0 + k[43] = 0 + k[44] = 0 + k[45] = 0 + k[46] = 0 + k[47] = 0 + k[48] = 0 + k[49] = 0 + k[50] = 0 + k[51] = 0 + k[52] = 0 + k[53] = 0 + k[54] = 0 + k[55] = 0 + k[56] = 0 + k[57] = 0 + k[58] = 0 + k[59] = 0 + k[60] = 0 + k[61] = 0 + k[62] = 0 + k[63] = 0 + } + binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3) + block(d, k[:]) + + { + const i = 0 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 1 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 2 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 3 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 4 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 5 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 6 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 7 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + + return +} + +func block(dig *digest, p []byte) { + if blockfunc == blockfuncSha { + blockShaGo(dig, p) + } else if blockfunc == blockfuncAvx2 { + blockAvx2Go(dig, p) + } else if blockfunc == blockfuncAvx { + blockAvxGo(dig, p) + } else if blockfunc == blockfuncSsse { + blockSsseGo(dig, p) + } else if blockfunc == blockfuncArm { + blockArmGo(dig, p) + } else if blockfunc == blockfuncGeneric { + blockGeneric(dig, p) + } +} + +func blockGeneric(dig *digest, p []byte) { + var w [64]uint32 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + for i := 16; i < 64; i++ { + v1 := w[i-2] + t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) + v2 := w[i-15] + t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 64; i++ { + t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} + +var _K = []uint32{ + 0x428a2f98, + 0x71374491, + 0xb5c0fbcf, + 0xe9b5dba5, + 0x3956c25b, + 0x59f111f1, + 0x923f82a4, + 0xab1c5ed5, + 0xd807aa98, + 0x12835b01, + 0x243185be, + 0x550c7dc3, + 0x72be5d74, + 0x80deb1fe, + 0x9bdc06a7, + 0xc19bf174, + 0xe49b69c1, + 0xefbe4786, + 0x0fc19dc6, + 0x240ca1cc, + 0x2de92c6f, + 0x4a7484aa, + 0x5cb0a9dc, + 0x76f988da, + 0x983e5152, + 0xa831c66d, + 0xb00327c8, + 0xbf597fc7, + 0xc6e00bf3, + 0xd5a79147, + 0x06ca6351, + 0x14292967, + 0x27b70a85, + 0x2e1b2138, + 0x4d2c6dfc, + 0x53380d13, + 0x650a7354, + 0x766a0abb, + 0x81c2c92e, + 0x92722c85, + 0xa2bfe8a1, + 0xa81a664b, + 0xc24b8b70, + 0xc76c51a3, + 0xd192e819, + 0xd6990624, + 0xf40e3585, + 0x106aa070, + 0x19a4c116, + 0x1e376c08, + 0x2748774c, + 0x34b0bcb5, + 0x391c0cb3, + 0x4ed8aa4a, + 0x5b9cca4f, + 0x682e6ff3, + 0x748f82ee, + 0x78a5636f, + 0x84c87814, + 0x8cc70208, + 0x90befffa, + 0xa4506ceb, + 0xbef9a3f7, + 0xc67178f2, +} diff --git a/vendor/github.com/minio/sha256-simd/sha256_test.go b/vendor/github.com/minio/sha256-simd/sha256_test.go new file mode 100644 index 0000000000..89499e377b --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256_test.go @@ -0,0 +1,2335 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Using this part of Minio codebase under the license +// Apache License Version 2.0 with modifications + +// SHA256 hash algorithm. See FIPS 180-2. + +package sha256 + +import ( + "encoding/hex" + "fmt" + "runtime" + "strings" + "testing" +) + +type sha256Test struct { + out [32]byte + in string +} + +var golden = []sha256Test{ + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + ""}, + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128)}, + "a"}, + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128)}, + "ab"}, + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128)}, + "abc"}, + {[32]byte{(0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128)}, + "abcd"}, + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + "abcde"}, + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + "abcdef"}, + {[32]byte{(1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128)}, + "abcdefg"}, + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128)}, + "abcdefgh"}, + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128)}, + "abcdefghi"}, + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128)}, + "abcdefghij"}, + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "Discard medicine more than two years old."}, + {[32]byte{(1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "He who has a shady past knows that nice guys finish last."}, + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + "I wouldn't marry him with a ten foot pole."}, + {[32]byte{(1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128)}, + "The days of the digital watch are numbered. -Tom Stoppard"}, + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "Nepal premier won't resign."}, + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128)}, + "For every action there is an equal and opposite government program."}, + {[32]byte{(0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + "His money is twice tainted: 'taint yours and 'taint mine."}, + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {[32]byte{(1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {[32]byte{(1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + "size: a.out: bad magic"}, + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128)}, + "The major problem is with sendmail. -Mark Horton"}, + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "If the enemy is within range, then so are you."}, + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + "It's well we cannot hear the screams/That we create in others' dreams."}, + {[32]byte{(1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + "You remind me of a TV show, but that's all right: I watch it anyway."}, + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "C is as portable as Stonehedge!!"}, + {[32]byte{(1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128)}, + "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {[32]byte{(1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128)}, + "How can you write a big system without C++? -Paul Glick"}, + // $ echo -n "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123" | sha256sum + // 13d8b6bf5cc79c03c07c719c48597bd33b79677e65098589b1580fca7f22bb22 + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123"}, + // $ echo -n "BCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234" | sha256sum + // 624ddef3009879c6874da2dd771d54f7330781b60e1955ceff5f9dce8bf4ea43 + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + "BCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234"}, + // $ echo -n "CDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345" | sha256sum + // cc031589b70dd4b24dc6def2121835ef1aa8074ff6952cdd3f81b5099a93c58d + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128)}, + "CDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345"}, + // $ echo -n "DEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456" | sha256sum + // d354abb6d538402db3d73daf95537a255ebaf3a943c80205be163e044fc46a70 + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128)}, + "DEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456"}, + // $ echo -n "EFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567" | sha256sum + // f78410b90a20b521afb28f41d6388482afab7265ff8884aa6290cc9f9ada30d3 + {[32]byte{(1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "EFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567"}, + // $ echo -n "FGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345678" | sha256sum + // c93a8cb7ed80166b15b79c8617410ca69e46fa1e3c1d14876699d3ce6090384f + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + "FGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345678"}, + // $ echo -n "GHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456789" | sha256sum + // 6cb808e9a7fb53fa680824f08554b660d29a4afc9a101f990b4bae3a12b7fbd8 + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "GHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456789"}, + // $ echo -n "HIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890" | sha256sum + // 84e8dd1afa78db222860ed40b6fcfc7a269469365f81f5712fb589555bdb01fe + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128)}, + "HIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890"}, + // $ echo -n "IJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890A" | sha256sum + // accab8e85b6bd178e975aaaa354aed8258bcd6af3e61bd4f12267635856cab0b + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128)}, + "IJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890A"}, + // $ echo -n "JKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890AB" | sha256sum + // 107f5ad8bc5d427246fc5f9c581134b61d8ba447e877df56cddad2bf53789172 + {[32]byte{(0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128)}, + "JKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890AB"}, + // $ echo -n "KLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABC" | sha256sum + // 7666f65b234f78aa537c8d098b181091ce8b7866a0285b52e6bf31b6f21ca9bb + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128)}, + "KLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABC"}, + // $ echo -n "LMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCD" | sha256sum + // 4eba948ccee7289ab1f01628a1ab756dee39a6894aed217edc9a91a8b35e50ca + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "LMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCD"}, + // $ echo -n "MNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDE" | sha256sum + // 5011218873e7ca84871668d26461e449e7033b7959d69cfb5c2fee773c3d432d + {[32]byte{(0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + "MNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDE"}, + // $ echo -n "NOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEF" | sha256sum + // 6932b4ddaf3696e5d5270739bdbe6ab120bb8034b877bd3a8e5a5d5ca263e1c5 + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + "NOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEF"}, + // $ echo -n "OPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFG" | sha256sum + // 91bb1bcbfcb4c093aab255a0b8c8b5b93605e2f51dd6b0898b70b9f3c10fc1f9 + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128)}, + "OPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFG"}, + // $ echo -n "PQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFGH" | sha256sum + // 0d1fa5355388e361c4591bd49c004e3d99044be274db43e91036611365aead02 + {[32]byte{(1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128)}, + "PQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFGH"}, + // $ echo -n "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" | sha256sum + // b6ac3cc10386331c765f04f041c147d0f278f2aed8eaa021e2d0057fc6f6ff9e + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128)}, + strings.Repeat("A", 128)}, + // $ echo -n "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB" | sha256sum + // 7abaa701a6f4bb8d9ea3872a315597eb6f2ccfd03392d8d10560837f6136d06a + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128)}, + strings.Repeat("B", 128)}, + // $ echo -n "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC" | sha256sum + // 6e8b9325f779dba60c4c148dee5ded43b19ed20d25d66e338abec53b99174fe8 + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128)}, + strings.Repeat("C", 128)}, + // $ echo -n "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD" | sha256sum + // 7aa020c91ac4d32e17efd9b64648b92e375987e0eae7d0a58544ca1e4fc32c3c + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + strings.Repeat("D", 128)}, + // $ echo -n "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" | sha256sum + // 997f6a2fc44f1400e9f64d7eac11fe99e21f4b7a3fc2ff3ec95c2ef016abb9e5 + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128)}, + strings.Repeat("E", 128)}, + // $ echo -n "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" | sha256sum + // 5c6cdeb9ccaa1d9c57662605ab738ec4ecf0467f576d4c2d7fae48710215582a + {[32]byte{(0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128)}, + strings.Repeat("F", 128)}, + // $ echo -n "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG" | sha256sum + // 394394b5f0e91a21d1e932f9ed55e098c8b05f3668f77134eeee843fef1d1758 + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + strings.Repeat("G", 128)}, + // $ echo -n "HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH" | sha256sum + // cab546612de68eaa849487342baadbac2561df6380ddac66137ef649e0cdfd0a + {[32]byte{(0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128)}, + strings.Repeat("H", 128)}, + // $ echo -n "IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII" | sha256sum + // 2be96cc28445876429be3005db465d1b9c8ed1432e3ac6f1514b6e9eee725ad8 + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + strings.Repeat("I", 128)}, + // $ echo -n "JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ" | sha256sum + // 238e5f81d54f2af58049b944c4a1b9516a36c2ef1e20887450b3482045714444 + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + strings.Repeat("J", 128)}, + // $ echo -n "KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" | sha256sum + // f3a5b826c64951661ce22dc67f0f79d13f633f0601aca2f5e1cf1a9f17dffd4f + {[32]byte{(1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + strings.Repeat("K", 128)}, + // $ echo -n "LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL" | sha256sum + // 1e90c05bedd24dc3e297d5b8fb215b95d8b7f4a040ee912069614c7a3382725d + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + strings.Repeat("L", 128)}, + // $ echo -n "MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM" | sha256sum + // 96239ac6fb99822797308f18d8455778fb5885103aa5ff59afe2219df657df99 + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128)}, + strings.Repeat("M", 128)}, + // $ echo -n "NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN" | sha256sum + // 11e7f5a6f15a4addba9b6b21bc4f8ecbdd969e179335269fc68d3a05f0f3da4a + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128)}, + strings.Repeat("N", 128)}, + // $ echo -n "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO" | sha256sum + // ae843b7e4e00afeb972bf948a345b319cca8bd0bcaa1428c1c67c88ea663c1e0 + {[32]byte{(0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128)}, + strings.Repeat("O", 128)}, + // $ echo -n "PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP" | sha256sum + // f16ef3e254ffb74b7e3c97d99486ef8c549e4c80bc6dfed7fe8c5e7e76f4fbcd + {[32]byte{(1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128)}, + strings.Repeat("P", 128)}, +} + +func TestGolden(t *testing.T) { + blockfuncSaved := blockfunc + + defer func() { + blockfunc = blockfuncSaved + }() + + if true { + blockfunc = blockfuncGeneric + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf("Generic: Sum256 function: sha256(%s) = %s want %s", g.in, s, hex.EncodeToString(g.out[:])) + } + } + } + + if runtime.GOARCH == "386" || runtime.GOARCH == "arm" { + // doesn't support anything but the generic version. + return + } + + if sha && ssse3 && sse41 { + blockfunc = blockfuncSha + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf("SHA: Sum256 function: sha256(%s) = %s want %s", g.in, s, hex.EncodeToString(g.out[:])) + } + } + } + if avx2 { + blockfunc = blockfuncAvx2 + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf("AVX2: Sum256 function: sha256(%s) = %s want %s", g.in, s, hex.EncodeToString(g.out[:])) + } + } + } + if avx { + blockfunc = blockfuncAvx + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf("AVX: Sum256 function: sha256(%s) = %s want %s", g.in, s, hex.EncodeToString(g.out[:])) + } + } + } + if ssse3 { + blockfunc = blockfuncSsse + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf("SSSE3: Sum256 function: sha256(%s) = %s want %s", g.in, s, hex.EncodeToString(g.out[:])) + } + } + } +} + +func TestSize(t *testing.T) { + c := New() + if got := c.Size(); got != Size { + t.Errorf("Size = %d; want %d", got, Size) + } +} + +func TestBlockSize(t *testing.T) { + c := New() + if got := c.BlockSize(); got != BlockSize { + t.Errorf("BlockSize = %d want %d", got, BlockSize) + } +} + +func benchmarkSize(b *testing.B, size int) { + var bench = New() + var buf = make([]byte, size) + b.SetBytes(int64(size)) + sum := make([]byte, bench.Size()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf[:size]) + bench.Sum(sum[:0]) + } +} + +func BenchmarkHash(b *testing.B) { + algos := []struct { + n string + t blockfuncType + f bool + }{ + {"SHA_", blockfuncSha, sha && sse41 && ssse3}, + {"AVX2", blockfuncAvx2, avx2}, + {"AVX_", blockfuncAvx, avx}, + {"SSSE", blockfuncSsse, ssse3}, + {"GEN_", blockfuncGeneric, true}, + } + + sizes := []struct { + n string + f func(*testing.B, int) + s int + }{ + {"8Bytes", benchmarkSize, 1 << 3}, + {"1K", benchmarkSize, 1 << 10}, + {"8K", benchmarkSize, 1 << 13}, + {"1M", benchmarkSize, 1 << 20}, + {"5M", benchmarkSize, 5 << 20}, + {"10M", benchmarkSize, 5 << 21}, + } + + for _, a := range algos { + if a.f { + blockfuncSaved := blockfunc + blockfunc = a.t + for _, y := range sizes { + s := a.n + "/" + y.n + b.Run(s, func(b *testing.B) { y.f(b, y.s) }) + } + blockfunc = blockfuncSaved + } + } +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go new file mode 100644 index 0000000000..52fcaee6d2 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go @@ -0,0 +1,22 @@ +//+build !noasm,!appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +//go:noescape +func blockAvx2(h []uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s new file mode 100644 index 0000000000..80b0b739be --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s @@ -0,0 +1,1449 @@ +//+build !noasm,!appengine + +// SHA256 implementation for AVX2 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// This code is based on an Intel White-Paper: +// "Fast SHA-256 Implementations on Intel Architecture Processors" +// +// together with the reference implementation from the following authors: +// James Guilford +// Kirk Yap +// Tim Chen +// +// For Golang it has been converted to Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 +// equivalents +// + +DATA K256<>+0x000(SB)/8, $0x71374491428a2f98 +DATA K256<>+0x008(SB)/8, $0xe9b5dba5b5c0fbcf +DATA K256<>+0x010(SB)/8, $0x71374491428a2f98 +DATA K256<>+0x018(SB)/8, $0xe9b5dba5b5c0fbcf +DATA K256<>+0x020(SB)/8, $0x59f111f13956c25b +DATA K256<>+0x028(SB)/8, $0xab1c5ed5923f82a4 +DATA K256<>+0x030(SB)/8, $0x59f111f13956c25b +DATA K256<>+0x038(SB)/8, $0xab1c5ed5923f82a4 +DATA K256<>+0x040(SB)/8, $0x12835b01d807aa98 +DATA K256<>+0x048(SB)/8, $0x550c7dc3243185be +DATA K256<>+0x050(SB)/8, $0x12835b01d807aa98 +DATA K256<>+0x058(SB)/8, $0x550c7dc3243185be +DATA K256<>+0x060(SB)/8, $0x80deb1fe72be5d74 +DATA K256<>+0x068(SB)/8, $0xc19bf1749bdc06a7 +DATA K256<>+0x070(SB)/8, $0x80deb1fe72be5d74 +DATA K256<>+0x078(SB)/8, $0xc19bf1749bdc06a7 +DATA K256<>+0x080(SB)/8, $0xefbe4786e49b69c1 +DATA K256<>+0x088(SB)/8, $0x240ca1cc0fc19dc6 +DATA K256<>+0x090(SB)/8, $0xefbe4786e49b69c1 +DATA K256<>+0x098(SB)/8, $0x240ca1cc0fc19dc6 +DATA K256<>+0x0a0(SB)/8, $0x4a7484aa2de92c6f +DATA K256<>+0x0a8(SB)/8, $0x76f988da5cb0a9dc +DATA K256<>+0x0b0(SB)/8, $0x4a7484aa2de92c6f +DATA K256<>+0x0b8(SB)/8, $0x76f988da5cb0a9dc +DATA K256<>+0x0c0(SB)/8, $0xa831c66d983e5152 +DATA K256<>+0x0c8(SB)/8, $0xbf597fc7b00327c8 +DATA K256<>+0x0d0(SB)/8, $0xa831c66d983e5152 +DATA K256<>+0x0d8(SB)/8, $0xbf597fc7b00327c8 +DATA K256<>+0x0e0(SB)/8, $0xd5a79147c6e00bf3 +DATA K256<>+0x0e8(SB)/8, $0x1429296706ca6351 +DATA K256<>+0x0f0(SB)/8, $0xd5a79147c6e00bf3 +DATA K256<>+0x0f8(SB)/8, $0x1429296706ca6351 +DATA K256<>+0x100(SB)/8, $0x2e1b213827b70a85 +DATA K256<>+0x108(SB)/8, $0x53380d134d2c6dfc +DATA K256<>+0x110(SB)/8, $0x2e1b213827b70a85 +DATA K256<>+0x118(SB)/8, $0x53380d134d2c6dfc +DATA K256<>+0x120(SB)/8, $0x766a0abb650a7354 +DATA K256<>+0x128(SB)/8, $0x92722c8581c2c92e +DATA K256<>+0x130(SB)/8, $0x766a0abb650a7354 +DATA K256<>+0x138(SB)/8, $0x92722c8581c2c92e +DATA K256<>+0x140(SB)/8, $0xa81a664ba2bfe8a1 +DATA K256<>+0x148(SB)/8, $0xc76c51a3c24b8b70 +DATA K256<>+0x150(SB)/8, $0xa81a664ba2bfe8a1 +DATA K256<>+0x158(SB)/8, $0xc76c51a3c24b8b70 +DATA K256<>+0x160(SB)/8, $0xd6990624d192e819 +DATA K256<>+0x168(SB)/8, $0x106aa070f40e3585 +DATA K256<>+0x170(SB)/8, $0xd6990624d192e819 +DATA K256<>+0x178(SB)/8, $0x106aa070f40e3585 +DATA K256<>+0x180(SB)/8, $0x1e376c0819a4c116 +DATA K256<>+0x188(SB)/8, $0x34b0bcb52748774c +DATA K256<>+0x190(SB)/8, $0x1e376c0819a4c116 +DATA K256<>+0x198(SB)/8, $0x34b0bcb52748774c +DATA K256<>+0x1a0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA K256<>+0x1a8(SB)/8, $0x682e6ff35b9cca4f +DATA K256<>+0x1b0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA K256<>+0x1b8(SB)/8, $0x682e6ff35b9cca4f +DATA K256<>+0x1c0(SB)/8, $0x78a5636f748f82ee +DATA K256<>+0x1c8(SB)/8, $0x8cc7020884c87814 +DATA K256<>+0x1d0(SB)/8, $0x78a5636f748f82ee +DATA K256<>+0x1d8(SB)/8, $0x8cc7020884c87814 +DATA K256<>+0x1e0(SB)/8, $0xa4506ceb90befffa +DATA K256<>+0x1e8(SB)/8, $0xc67178f2bef9a3f7 +DATA K256<>+0x1f0(SB)/8, $0xa4506ceb90befffa +DATA K256<>+0x1f8(SB)/8, $0xc67178f2bef9a3f7 + +DATA K256<>+0x200(SB)/8, $0x0405060700010203 +DATA K256<>+0x208(SB)/8, $0x0c0d0e0f08090a0b +DATA K256<>+0x210(SB)/8, $0x0405060700010203 +DATA K256<>+0x218(SB)/8, $0x0c0d0e0f08090a0b +DATA K256<>+0x220(SB)/8, $0x0b0a090803020100 +DATA K256<>+0x228(SB)/8, $0xffffffffffffffff +DATA K256<>+0x230(SB)/8, $0x0b0a090803020100 +DATA K256<>+0x238(SB)/8, $0xffffffffffffffff +DATA K256<>+0x240(SB)/8, $0xffffffffffffffff +DATA K256<>+0x248(SB)/8, $0x0b0a090803020100 +DATA K256<>+0x250(SB)/8, $0xffffffffffffffff +DATA K256<>+0x258(SB)/8, $0x0b0a090803020100 + +GLOBL K256<>(SB), 8, $608 + +// We need 0x220 stack space aligned on a 512 boundary, so for the +// worstcase-aligned SP we need twice this amount, being 1088 (=0x440) +// +// SP aligned end-aligned stacksize +// 100013d0 10001400 10001620 592 +// 100013d8 10001400 10001620 584 +// 100013e0 10001600 10001820 1088 +// 100013e8 10001600 10001820 1080 + +// func blockAvx2(h []uint32, message []uint8) +TEXT ·blockAvx2(SB),$1088-48 + + MOVQ h+0(FP), DI // DI: &h + MOVQ message_base+24(FP), SI // SI: &message + MOVQ message_len+32(FP), DX // len(message) + ADDQ SI, DX // end pointer of input + MOVQ SP, R11 // copy stack pointer + ADDQ $0x220, SP // sp += 0x220 + ANDQ $0xfffffffffffffe00, SP // align stack frame + ADDQ $0x1c0, SP + MOVQ DI, 0x40(SP) // save ctx + MOVQ SI, 0x48(SP) // save input + MOVQ DX, 0x50(SP) // save end pointer + MOVQ R11, 0x58(SP) // save copy of stack pointer + + WORD $0xf8c5; BYTE $0x77 // vzeroupper + ADDQ $0x40, SI // input++ + MOVL (DI), AX + MOVQ SI, R12 // borrow $T1 + MOVL 4(DI), BX + CMPQ SI, DX // $_end + MOVL 8(DI), CX + LONG $0xe4440f4c // cmove r12,rsp /* next block or random data */ + MOVL 12(DI), DX + MOVL 16(DI), R8 + MOVL 20(DI), R9 + MOVL 24(DI), R10 + MOVL 28(DI), R11 + + LEAQ K256<>(SB), BP + LONG $0x856f7dc5; LONG $0x00000220 // VMOVDQA YMM8, 0x220[rbp] /* vmovdqa ymm8,YMMWORD PTR [rip+0x220] */ + LONG $0x8d6f7dc5; LONG $0x00000240 // VMOVDQA YMM9, 0x240[rbp] /* vmovdqa ymm9,YMMWORD PTR [rip+0x240] */ + LONG $0x956f7dc5; LONG $0x00000200 // VMOVDQA YMM10, 0x200[rbp] /* vmovdqa ymm7,YMMWORD PTR [rip+0x200] */ + +loop0: + LONG $0x6f7dc1c4; BYTE $0xfa // VMOVDQA YMM7, YMM10 + + // Load first 16 dwords from two blocks + MOVOU -64(SI), X0 // vmovdqu xmm0,XMMWORD PTR [rsi-0x40] + MOVOU -48(SI), X1 // vmovdqu xmm1,XMMWORD PTR [rsi-0x30] + MOVOU -32(SI), X2 // vmovdqu xmm2,XMMWORD PTR [rsi-0x20] + MOVOU -16(SI), X3 // vmovdqu xmm3,XMMWORD PTR [rsi-0x10] + + // Byte swap data and transpose data into high/low + LONG $0x387dc3c4; WORD $0x2404; BYTE $0x01 // vinserti128 ymm0,ymm0,[r12],0x1 + LONG $0x3875c3c4; LONG $0x0110244c // vinserti128 ymm1,ymm1,0x10[r12],0x1 + LONG $0x007de2c4; BYTE $0xc7 // vpshufb ymm0,ymm0,ymm7 + LONG $0x386dc3c4; LONG $0x01202454 // vinserti128 ymm2,ymm2,0x20[r12],0x1 + LONG $0x0075e2c4; BYTE $0xcf // vpshufb ymm1,ymm1,ymm7 + LONG $0x3865c3c4; LONG $0x0130245c // vinserti128 ymm3,ymm3,0x30[r12],0x1 + + LEAQ K256<>(SB), BP + LONG $0x006de2c4; BYTE $0xd7 // vpshufb ymm2,ymm2,ymm7 + LONG $0x65fefdc5; BYTE $0x00 // vpaddd ymm4,ymm0,[rbp] + LONG $0x0065e2c4; BYTE $0xdf // vpshufb ymm3,ymm3,ymm7 + LONG $0x6dfef5c5; BYTE $0x20 // vpaddd ymm5,ymm1,0x20[rbp] + LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,0x40[rbp] + LONG $0x7dfee5c5; BYTE $0x60 // vpaddd ymm7,ymm3,0x60[rbp] + + LONG $0x247ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm4 + XORQ R14, R14 + LONG $0x6c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm5 + + ADDQ $-0x40, SP + MOVQ BX, DI + LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 + XORQ CX, DI // magic + LONG $0x7c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm7 + MOVQ R9, R12 + ADDQ $0x80, BP + +loop1: + // Schedule 48 input dwords, by doing 3 rounds of 12 each + // Note: SIMD instructions are interleaved with the SHA calculations + ADDQ $-0x40, SP + LONG $0x0f75e3c4; WORD $0x04e0 // vpalignr ymm4,ymm1,ymm0,0x4 + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) + LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0x0f65e3c4; WORD $0x04fa // vpalignr ymm7,ymm3,ymm2,0x4 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0xc7fefdc5 // vpaddd ymm0,ymm0,ymm7 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + LONG $0xfb70fdc5; BYTE $0xfa // vpshufd ymm7,ymm3,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) + LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + LONG $0xc4fefdc5 // vpaddd ymm0,ymm0,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) + LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf870fdc5; BYTE $0x50 // vpshufd ymm7,ymm0,0x50 + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) + LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0x75fefdc5; BYTE $0x00 // vpaddd ymm6,ymm0,[rbp+0x0] + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 + LONG $0x0f6de3c4; WORD $0x04e1 // vpalignr ymm4,ymm2,ymm1,0x4 + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) + LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0x0f7de3c4; WORD $0x04fb // vpalignr ymm7,ymm0,ymm3,0x4 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0xcffef5c5 // vpaddd ymm1,ymm1,ymm7 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + LONG $0xf870fdc5; BYTE $0xfa // vpshufd ymm7,ymm0,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) + LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + LONG $0xccfef5c5 // vpaddd ymm1,ymm1,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) + LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf970fdc5; BYTE $0x50 // vpshufd ymm7,ymm1,0x50 + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) + LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0x75fef5c5; BYTE $0x20 // vpaddd ymm6,ymm1,[rbp+0x20] + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 + + LONG $0x24648d48; BYTE $0xc0 // lea rsp,[rsp-0x40] + LONG $0x0f65e3c4; WORD $0x04e2 // vpalignr ymm4,ymm3,ymm2,0x4 + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) + LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0x0f75e3c4; WORD $0x04f8 // vpalignr ymm7,ymm1,ymm0,0x4 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0xd7feedc5 // vpaddd ymm2,ymm2,ymm7 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + LONG $0xf970fdc5; BYTE $0xfa // vpshufd ymm7,ymm1,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) + LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + LONG $0xd4feedc5 // vpaddd ymm2,ymm2,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) + LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xfa70fdc5; BYTE $0x50 // vpshufd ymm7,ymm2,0x50 + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) + LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,[rbp+0x40] + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 + LONG $0x0f7de3c4; WORD $0x04e3 // vpalignr ymm4,ymm0,ymm3,0x4 + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) + LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0x0f6de3c4; WORD $0x04f9 // vpalignr ymm7,ymm2,ymm1,0x4 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0xdffee5c5 // vpaddd ymm3,ymm3,ymm7 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + LONG $0xfa70fdc5; BYTE $0xfa // vpshufd ymm7,ymm2,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) + LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + LONG $0xdcfee5c5 // vpaddd ymm3,ymm3,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) + LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xfb70fdc5; BYTE $0x50 // vpshufd ymm7,ymm3,0x50 + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) + LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0x75fee5c5; BYTE $0x60 // vpaddd ymm6,ymm3,[rbp+0x60] + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 + ADDQ $0x80, BP + + CMPB 0x3(BP), $0x0 + JNE loop1 + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x40) + LONG $0x245c0344; BYTE $0x40 // add r11d,[rsp+0x40] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x44) + LONG $0x24540344; BYTE $0x44 // add r10d,[rsp+0x44] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x48) + LONG $0x244c0344; BYTE $0x48 // add r9d,[rsp+0x48] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x4c) + LONG $0x24440344; BYTE $0x4c // add r8d,[rsp+0x4c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x60) + LONG $0x60245403 // add edx,[rsp+0x60] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x64) + LONG $0x64244c03 // add ecx,[rsp+0x64] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x68) + LONG $0x68245c03 // add ebx,[rsp+0x68] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x6c) + LONG $0x6c244403 // add eax,[rsp+0x6c] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x00) + LONG $0x241c0344 // add r11d,[rsp] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x04) + LONG $0x24540344; BYTE $0x04 // add r10d,[rsp+0x4] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x08) + LONG $0x244c0344; BYTE $0x08 // add r9d,[rsp+0x8] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x0c) + LONG $0x24440344; BYTE $0x0c // add r8d,[rsp+0xc] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x20) + LONG $0x20245403 // add edx,[rsp+0x20] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x24) + LONG $0x24244c03 // add ecx,[rsp+0x24] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x28) + LONG $0x28245c03 // add ebx,[rsp+0x28] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x2c) + LONG $0x2c244403 // add eax,[rsp+0x2c] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + MOVQ 0x200(SP), DI // $_ctx + ADDQ R14, AX + + LEAQ 0x1c0(SP), BP + + ADDL (DI), AX + ADDL 4(DI), BX + ADDL 8(DI), CX + ADDL 12(DI), DX + ADDL 16(DI), R8 + ADDL 20(DI), R9 + ADDL 24(DI), R10 + ADDL 28(DI), R11 + + MOVL AX, (DI) + MOVL BX, 4(DI) + MOVL CX, 8(DI) + MOVL DX, 12(DI) + MOVL R8, 16(DI) + MOVL R9, 20(DI) + MOVL R10, 24(DI) + MOVL R11, 28(DI) + + CMPQ SI, 0x50(BP) // $_end + JE done + + XORQ R14, R14 + MOVQ BX, DI + XORQ CX, DI // magic + MOVQ R9, R12 + +loop2: + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, BP, 0x10) + LONG $0x105d0344 // add r11d,[rbp+0x10] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, BP, 0x14) + LONG $0x14550344 // add r10d,[rbp+0x14] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, BP, 0x18) + LONG $0x184d0344 // add r9d,[rbp+0x18] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, BP, 0x1c) + LONG $0x1c450344 // add r8d,[rbp+0x1c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, BP, 0x30) + WORD $0x5503; BYTE $0x30 // add edx,[rbp+0x30] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, BP, 0x34) + WORD $0x4d03; BYTE $0x34 // add ecx,[rbp+0x34] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, BP, 0x38) + WORD $0x5d03; BYTE $0x38 // add ebx,[rbp+0x38] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, BP, 0x3c) + WORD $0x4503; BYTE $0x3c // add eax,[rbp+0x3c] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + ADDQ $-0x40, BP + CMPQ BP, SP + JAE loop2 + + MOVQ 0x200(SP), DI // $_ctx + ADDQ R14, AX + + ADDQ $0x1c0, SP + + ADDL (DI), AX + ADDL 4(DI), BX + ADDL 8(DI), CX + ADDL 12(DI), DX + ADDL 16(DI), R8 + ADDL 20(DI), R9 + + ADDQ $0x80, SI // input += 2 + ADDL 24(DI), R10 + MOVQ SI, R12 + ADDL 28(DI), R11 + CMPQ SI, 0x50(SP) // input == _end + + MOVL AX, (DI) + LONG $0xe4440f4c // cmove r12,rsp /* next block or stale data */ + MOVL AX, (DI) + MOVL BX, 4(DI) + MOVL CX, 8(DI) + MOVL DX, 12(DI) + MOVL R8, 16(DI) + MOVL R9, 20(DI) + MOVL R10, 24(DI) + MOVL R11, 28(DI) + + JBE loop0 + LEAQ (SP), BP + +done: + MOVQ BP, SP + MOVQ 0x58(SP), SP // restore saved stack pointer + WORD $0xf8c5; BYTE $0x77 // vzeroupper + + RET + diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm new file mode 100644 index 0000000000..c959b1aa26 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm @@ -0,0 +1,686 @@ + +// 16x Parallel implementation of SHA256 for AVX512 + +// +// Minio Cloud Storage, (C) 2017 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// This code is based on the Intel Multi-Buffer Crypto for IPSec library +// and more specifically the following implementation: +// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm +// +// For Golang it has been converted into Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble the AVX512 instructions +// + +// Copyright (c) 2017, Intel Corporation +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Intel Corporation nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#define SHA256_DIGEST_ROW_SIZE 64 + +// arg1 +#define STATE rdi +#define STATE_P9 DI +// arg2 +#define INP_SIZE rsi +#define INP_SIZE_P9 SI + +#define IDX rcx +#define TBL rdx +#define TBL_P9 DX + +#define INPUT rax +#define INPUT_P9 AX + +#define inp0 r9 +#define SCRATCH_P9 R12 +#define SCRATCH r12 +#define maskp r13 +#define MASKP_P9 R13 +#define mask r14 +#define MASK_P9 R14 + +#define A zmm0 +#define B zmm1 +#define C zmm2 +#define D zmm3 +#define E zmm4 +#define F zmm5 +#define G zmm6 +#define H zmm7 +#define T1 zmm8 +#define TMP0 zmm9 +#define TMP1 zmm10 +#define TMP2 zmm11 +#define TMP3 zmm12 +#define TMP4 zmm13 +#define TMP5 zmm14 +#define TMP6 zmm15 + +#define W0 zmm16 +#define W1 zmm17 +#define W2 zmm18 +#define W3 zmm19 +#define W4 zmm20 +#define W5 zmm21 +#define W6 zmm22 +#define W7 zmm23 +#define W8 zmm24 +#define W9 zmm25 +#define W10 zmm26 +#define W11 zmm27 +#define W12 zmm28 +#define W13 zmm29 +#define W14 zmm30 +#define W15 zmm31 + + +#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \ + \ + \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0} + \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0} + \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0} + \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0} + \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0} + \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0} + \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0} + \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0} + \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0} + \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0} + \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0} + \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0} + \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0} + \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0} + \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0} + \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0} + \ + \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + \ + \ // process top half + vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0} + vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2} + vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0} + vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2} + \ + vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1} + vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2} + vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3} + vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0} + \ + \ // use r2 in place of t0 + vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0} + vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2} + vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0} + vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2} + \ + vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1} + vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2} + vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3} + vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0} + \ + \ // use r6 in place of t0 + vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0} + vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2} + vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0} + vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2} + \ + vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1} + vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2} + vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3} + vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0} + \ + \ // use r10 in place of t0 + vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0} + vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2} + vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00} + vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02} + \ + vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1} + vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2} + vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3} + vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0} + \ + \ // At this point, the registers that contain interesting data are: + \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12 + \ // Can use t1 and r14 as scratch registers + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \ + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \ + \ + vmovdqu32 _r14, [rbx] \ + vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0} + vmovdqu32 _t1, [r8] \ + vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vmovdqu32 _r2, [rbx] \ + vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1} + vmovdqu32 _t0, [r8] \ + vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vmovdqu32 _r3, [rbx] \ + vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r7, [r8] \ + vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vmovdqu32 _r1, [rbx] \ + vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3} + vmovdqu32 _r5, [r8] \ + vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vmovdqu32 _r0, [rbx] \ + vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0} + vmovdqu32 _r4, [r8] \ + vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4} + \ + vmovdqu32 _r6, [rbx] \ + vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1} + vmovdqu32 _r10, [r8] \ + vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5} + \ + vmovdqu32 _r11, [rbx] \ + vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2} + vmovdqu32 _r15, [r8] \ + vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6} + \ + vmovdqu32 _r9, [rbx] \ + vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3} + vmovdqu32 _r13, [r8] \ + vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7} + \ + \ // At this point r8 and r12 can be used as scratch registers + vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ + vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ + vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ + vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ + vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + + +// CH(A, B, C) = (A&B) ^ (~A&C) +// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G) +// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22 +// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25 +// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3 +// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10 + +// Main processing loop per round +#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \ + \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt + \ // T2 = SIGMA0(A) + MAJ(A, B, C) + \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2 + \ + \ // H becomes T2, then add T1 for A + \ // D becomes D + T1 for E + \ + vpaddd T1, _H, TMP3 \ // T1 = H + Kt + vmovdqu32 TMP0, _E \ + vprord TMP1, _E, 6 \ // ROR_6(E) + vprord TMP2, _E, 11 \ // ROR_11(E) + vprord TMP3, _E, 25 \ // ROR_25(E) + vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G) + vpaddd T1, T1, _WT \ // T1 = T1 + Wt + vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E) + vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G) + vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E) + vpaddd _D, _D, T1 \ // D = D + T1 + \ + vprord _H, _A, 2 \ // ROR_2(A) + vprord TMP2, _A, 13 \ // ROR_13(A) + vprord TMP3, _A, 22 \ // ROR_22(A) + vmovdqu32 TMP0, _A \ + vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C) + vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A) + vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C) + vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1 + \ + vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt + + +#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \ + vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2) + vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2) + vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7 + \ + vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15) + vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15) + vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + + \ // Wt-7 + sigma0(Wt-15) + + + +// Note this is reading in a block of data for one lane +// When all 16 are read, the data must be transposed to build msg schedule +#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \ + TESTQ $(1<(SB), TBL_P9 + vmovdqu32 TMP2, [TBL] + + // Get first K from table + MOVQ table+16(FP), TBL_P9 + vmovdqu32 TMP3, [TBL] + + // Save digests for later addition + vmovdqu32 [SCRATCH + 64*0], A + vmovdqu32 [SCRATCH + 64*1], B + vmovdqu32 [SCRATCH + 64*2], C + vmovdqu32 [SCRATCH + 64*3], D + vmovdqu32 [SCRATCH + 64*4], E + vmovdqu32 [SCRATCH + 64*5], F + vmovdqu32 [SCRATCH + 64*6], G + vmovdqu32 [SCRATCH + 64*7], H + + add IDX, 64 + + // Transpose input data + TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1) + + vpshufb W0, W0, TMP2 + vpshufb W1, W1, TMP2 + vpshufb W2, W2, TMP2 + vpshufb W3, W3, TMP2 + vpshufb W4, W4, TMP2 + vpshufb W5, W5, TMP2 + vpshufb W6, W6, TMP2 + vpshufb W7, W7, TMP2 + vpshufb W8, W8, TMP2 + vpshufb W9, W9, TMP2 + vpshufb W10, W10, TMP2 + vpshufb W11, W11, TMP2 + vpshufb W12, W12, TMP2 + vpshufb W13, W13, TMP2 + vpshufb W14, W14, TMP2 + vpshufb W15, W15, TMP2 + + // MSG Schedule for W0-W15 is now complete in registers + // Process first 48 rounds + // Calculate next Wt+16 after processing is complete and Wt is unneeded + + PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + + // Check if this is the last block + sub INP_SIZE, 1 + JE lastLoop + + // Load next mask for inputs + ADDQ $8, MASKP_P9 + MOVQ (MASKP_P9), MASK_P9 + + // Process last 16 rounds + // Read in next block msg data for use in first 16 words of msg sched + + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W0, 0, skipNext0) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W1, 1, skipNext1) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15( W2, 2, skipNext2) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15( W3, 3, skipNext3) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15( W4, 4, skipNext4) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15( W5, 5, skipNext5) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15( W6, 6, skipNext6) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15( W7, 7, skipNext7) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W8, 8, skipNext8) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W9, 9, skipNext9) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15(W10, 10, skipNext10) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15(W11, 11, skipNext11) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15(W12, 12, skipNext12) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15(W13, 13, skipNext13) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15(W14, 14, skipNext14) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15(W15, 15, skipNext15) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + kmovq k1, mask + JMP lloop + +lastLoop: + // Process last 16 rounds + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + // Write out digest + vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A + vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B + vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C + vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D + vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E + vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F + vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G + vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H + + VZEROUPPER + RET + +// +// Tables +// + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go new file mode 100644 index 0000000000..db8e48d311 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go @@ -0,0 +1,500 @@ +//+build !noasm,!appengine + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "encoding/binary" + "errors" + "hash" + "sort" + "sync/atomic" + "time" +) + +//go:noescape +func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte) + +// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to +// differentiate with default initialiation value of 0 +const Avx512ServerUID = 16 + +var uidCounter uint64 + +// NewAvx512 - initialize sha256 Avx512 implementation. +func NewAvx512(a512srv *Avx512Server) hash.Hash { + uid := atomic.AddUint64(&uidCounter, 1) + return &Avx512Digest{uid: uid, a512srv: a512srv} +} + +// Avx512Digest - Type for computing SHA256 using Avx512 +type Avx512Digest struct { + uid uint64 + a512srv *Avx512Server + x [chunk]byte + nx int + len uint64 + final bool + result [Size]byte +} + +// Size - Return size of checksum +func (d *Avx512Digest) Size() int { return Size } + +// BlockSize - Return blocksize of checksum +func (d Avx512Digest) BlockSize() int { return BlockSize } + +// Reset - reset sha digest to its initial values +func (d *Avx512Digest) Reset() { + d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true} + d.nx = 0 + d.len = 0 + d.final = false +} + +// Write to digest +func (d *Avx512Digest) Write(p []byte) (nn int, err error) { + + if d.final { + return 0, errors.New("Avx512Digest already finalized. Reset first before writing again") + } + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]} + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]} + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Sum - Return sha256 sum in bytes +func (d *Avx512Digest) Sum(in []byte) (result []byte) { + + if d.final { + return append(in, d.result[:]...) + } + + trail := make([]byte, 0, 128) + trail = append(trail, d.x[:d.nx]...) + + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + trail = append(trail, tmp[0:56-len%64]...) + } else { + trail = append(trail, tmp[0:64+56-len%64]...) + } + d.nx = 0 + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + trail = append(trail, tmp[0:8]...) + + sumCh := make(chan [Size]byte) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh} + d.result = <-sumCh + d.final = true + return append(in, d.result[:]...) +} + +var table = [512]uint64{ + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2} + +// Interface function to assembly ode +func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte { + + scratch := [512]byte{} + sha256X16Avx512(digests, &scratch, &table, mask, input) + + output := [16][Size]byte{} + for i := 0; i < 16; i++ { + output[i] = getDigest(i, digests[:]) + } + + return output +} + +func getDigest(index int, state []byte) (sum [Size]byte) { + for j := 0; j < 16; j += 2 { + for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size { + binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4])) + } + } + return +} + +// Message to send across input channel +type blockInput struct { + uid uint64 + msg []byte + reset bool + final bool + sumCh chan [Size]byte +} + +// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations +type Avx512Server struct { + blocksCh chan blockInput // Input channel + totalIn int // Total number of inputs waiting to be processed + lanes [16]Avx512LaneInfo // Array with info per lane (out of 16) + digests map[uint64][Size]byte // Map of uids to (interim) digest results +} + +// Avx512LaneInfo - Info for each lane +type Avx512LaneInfo struct { + uid uint64 // unique identification for this SHA processing + block []byte // input block to be processed + outputCh chan [Size]byte // channel for output result +} + +// NewAvx512Server - Create new object for parallel processing handling +func NewAvx512Server() *Avx512Server { + a512srv := &Avx512Server{} + a512srv.digests = make(map[uint64][Size]byte) + a512srv.blocksCh = make(chan blockInput) + + // Start a single thread for reading from the input channel + go a512srv.Process() + return a512srv +} + +// Process - Sole handler for reading from the input channel +func (a512srv *Avx512Server) Process() { + for { + select { + case block := <-a512srv.blocksCh: + if block.reset { + a512srv.reset(block.uid) + continue + } + index := block.uid & 0xf + // fmt.Println("Adding message:", block.uid, index) + + if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs + //fmt.Println("Invoking Blocks()") + a512srv.blocks() + } + a512srv.totalIn++ + a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg} + if block.final { + a512srv.lanes[index].outputCh = block.sumCh + } + if a512srv.totalIn == len(a512srv.lanes) { + // fmt.Println("Invoking Blocks() while FULL: ") + a512srv.blocks() + } + + // TODO: test with larger timeout + case <-time.After(1 * time.Microsecond): + for _, lane := range a512srv.lanes { + if lane.block != nil { // check if there is any input to process + // fmt.Println("Invoking Blocks() on TIMEOUT: ") + a512srv.blocks() + break // we are done + } + } + } + } +} + +// Do a reset for this calculation +func (a512srv *Avx512Server) reset(uid uint64) { + + // Check if there is a message still waiting to be processed (and remove if so) + for i, lane := range a512srv.lanes { + if lane.uid == uid { + if lane.block != nil { + a512srv.lanes[i] = Avx512LaneInfo{} // clear message + a512srv.totalIn-- + } + } + } + + // Delete entry from hash map + delete(a512srv.digests, uid) +} + +// Invoke assembly and send results back +func (a512srv *Avx512Server) blocks() { + + inputs := [16][]byte{} + for i := range inputs { + inputs[i] = a512srv.lanes[i].block + } + + mask := expandMask(genMask(inputs)) + outputs := blockAvx512(a512srv.getDigests(), inputs, mask) + + a512srv.totalIn = 0 + for i := 0; i < len(outputs); i++ { + uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh + a512srv.digests[uid] = outputs[i] + a512srv.lanes[i] = Avx512LaneInfo{} + + if outputCh != nil { + // Send back result + outputCh <- outputs[i] + delete(a512srv.digests, uid) // Delete entry from hashmap + } + } +} + +func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) { + a512srv.blocksCh <- blockInput{uid: uid, msg: p} + return len(p), nil +} + +// Sum - return sha256 sum in bytes for a given sum id. +func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte { + sumCh := make(chan [32]byte) + a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh} + return <-sumCh +} + +func (a512srv *Avx512Server) getDigests() *[512]byte { + digests := [512]byte{} + for i, lane := range a512srv.lanes { + a, ok := a512srv.digests[lane.uid] + if ok { + binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4])) + binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8])) + binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12])) + binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16])) + binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20])) + binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24])) + binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28])) + binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32])) + } else { + binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) + binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) + binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) + binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) + binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) + binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) + binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) + binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) + } + } + return &digests +} + +// Helper struct for sorting blocks based on length +type lane struct { + len uint + pos uint +} + +type lanes []lane + +func (lns lanes) Len() int { return len(lns) } +func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] } +func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len } + +// Helper struct for +type maskRounds struct { + mask uint64 + rounds uint64 +} + +func genMask(input [16][]byte) [16]maskRounds { + + // Sort on blocks length small to large + var sorted [16]lane + for c, inpt := range input { + sorted[c] = lane{uint(len(inpt)), uint(c)} + } + sort.Sort(lanes(sorted[:])) + + // Create mask array including 'rounds' between masks + m, round, index := uint64(0xffff), uint64(0), 0 + var mr [16]maskRounds + for _, s := range sorted { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round} + index++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + + return mr +} + +// TODO: remove function +func expandMask(mr [16]maskRounds) []uint64 { + size := uint64(0) + for _, r := range mr { + size += r.rounds + } + result, index := make([]uint64, size), 0 + for _, r := range mr { + for j := uint64(0); j < r.rounds; j++ { + result[index] = r.mask + index++ + } + } + return result +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s new file mode 100644 index 0000000000..275bcacbc1 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s @@ -0,0 +1,267 @@ +//+build !noasm,!appengine + +TEXT ·sha256X16Avx512(SB), 7, $0 + MOVQ digests+0(FP), DI + MOVQ scratch+8(FP), R12 + MOVQ mask_len+32(FP), SI + MOVQ mask_base+24(FP), R13 + MOVQ (R13), R14 + LONG $0x92fbc1c4; BYTE $0xce + LEAQ inputs+48(FP), AX + QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07 + MOVQ table+16(FP), DX + WORD $0x3148; BYTE $0xc9 + TESTQ $(1<<0), R14 + JE skipInput0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipInput0: + TESTQ $(1<<1), R14 + JE skipInput1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipInput1: + TESTQ $(1<<2), R14 + JE skipInput2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipInput2: + TESTQ $(1<<3), R14 + JE skipInput3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipInput3: + TESTQ $(1<<4), R14 + JE skipInput4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipInput4: + TESTQ $(1<<5), R14 + JE skipInput5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipInput5: + TESTQ $(1<<6), R14 + JE skipInput6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipInput6: + TESTQ $(1<<7), R14 + JE skipInput7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipInput7: + TESTQ $(1<<8), R14 + JE skipInput8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipInput8: + TESTQ $(1<<9), R14 + JE skipInput9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipInput9: + TESTQ $(1<<10), R14 + JE skipInput10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipInput10: + TESTQ $(1<<11), R14 + JE skipInput11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipInput11: + TESTQ $(1<<12), R14 + JE skipInput12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipInput12: + TESTQ $(1<<13), R14 + JE skipInput13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipInput13: + TESTQ $(1<<14), R14 + JE skipInput14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipInput14: + TESTQ $(1<<15), R14 + JE skipInput15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipInput15: +lloop: + LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX + LONG $0x487e7162; WORD $0x1a6f + MOVQ table+16(FP), DX + QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88 + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 + QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005 + JE lastLoop + ADDQ $8, R13 + MOVQ (R13), R14 + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31 + TESTQ $(1<<0), R14 + JE skipNext0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipNext0: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32 + TESTQ $(1<<1), R14 + JE skipNext1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipNext1: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33 + TESTQ $(1<<2), R14 + JE skipNext2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipNext2: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34 + TESTQ $(1<<3), R14 + JE skipNext3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipNext3: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35 + TESTQ $(1<<4), R14 + JE skipNext4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipNext4: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36 + TESTQ $(1<<5), R14 + JE skipNext5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipNext5: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37 + TESTQ $(1<<6), R14 + JE skipNext6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipNext6: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38 + TESTQ $(1<<7), R14 + JE skipNext7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipNext7: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39 + TESTQ $(1<<8), R14 + JE skipNext8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipNext8: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a + TESTQ $(1<<9), R14 + JE skipNext9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipNext9: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b + TESTQ $(1<<10), R14 + JE skipNext10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipNext10: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c + TESTQ $(1<<11), R14 + JE skipNext11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipNext11: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d + TESTQ $(1<<12), R14 + JE skipNext12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipNext12: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e + TESTQ $(1<<13), R14 + JE skipNext13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipNext13: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f + TESTQ $(1<<14), R14 + JE skipNext14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipNext14: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40 + TESTQ $(1<<15), R14 + JE skipNext15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipNext15: + QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1 + JMP lloop + +lastLoop: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f + VZEROUPPER + RET + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64_test.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64_test.go new file mode 100644 index 0000000000..bab089c027 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64_test.go @@ -0,0 +1,411 @@ +//+build !noasm,!appengine + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "reflect" + "sync" + "testing" +) + +func TestGoldenAVX512(t *testing.T) { + + if !avx512 { + t.SkipNow() + return + } + + server := NewAvx512Server() + h512 := NewAvx512(server) + + for _, g := range golden { + h512.Reset() + h512.Write([]byte(g.in)) + digest := h512.Sum([]byte{}) + s := fmt.Sprintf("%x", digest) + if !reflect.DeepEqual(digest, g.out[:]) { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", g.in, s, hex.EncodeToString(g.out[:])) + } + } +} + +func createInputs(size int) [16][]byte { + input := [16][]byte{} + for i := 0; i < 16; i++ { + input[i] = make([]byte, size) + } + return input +} + +func initDigests() *[512]byte { + digests := [512]byte{} + for i := 0; i < 16; i++ { + binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) + binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) + binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) + binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) + binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) + binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) + binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) + binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) + } + return &digests +} + +func testSha256Avx512(t *testing.T, offset, padding int) [16][]byte { + + if !avx512 { + t.SkipNow() + return [16][]byte{} + } + + l := uint(len(golden[offset].in)) + extraBlock := uint(0) + if padding == 0 { + extraBlock += 9 + } else { + extraBlock += 64 + } + input := createInputs(int(l + extraBlock)) + for i := 0; i < 16; i++ { + copy(input[i], golden[offset+i].in) + input[i][l] = 0x80 + copy(input[i][l+1:], bytes.Repeat([]byte{0}, padding)) + + // Length in bits. + len := uint64(l) + len <<= 3 + for ii := uint(0); ii < 8; ii++ { + input[i][l+1+uint(padding)+ii] = byte(len >> (56 - 8*ii)) + } + } + mask := make([]uint64, len(input[0])>>6) + for m := range mask { + mask[m] = 0xffff + } + output := blockAvx512(initDigests(), input, mask) + for i := 0; i < 16; i++ { + if bytes.Compare(output[i][:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, hex.EncodeToString(output[i][:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } + return input +} + +func TestAvx512_1Block(t *testing.T) { testSha256Avx512(t, 31, 0) } +func TestAvx512_3Blocks(t *testing.T) { testSha256Avx512(t, 47, 55) } + +func TestAvx512_MixedBlocks(t *testing.T) { + + if !avx512 { + t.SkipNow() + return + } + + inputSingleBlock := testSha256Avx512(t, 31, 0) + inputMultiBlock := testSha256Avx512(t, 47, 55) + + input := [16][]byte{} + + for i := range input { + if i%2 == 0 { + input[i] = inputMultiBlock[i] + } else { + input[i] = inputSingleBlock[i] + } + } + + mask := [3]uint64{0xffff, 0x5555, 0x5555} + output := blockAvx512(initDigests(), input, mask[:]) + var offset int + for i := 0; i < len(output); i++ { + if i%2 == 0 { + offset = 47 + } else { + offset = 31 + } + if bytes.Compare(output[i][:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, hex.EncodeToString(output[i][:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } +} + +func TestAvx512_MixedWithNilBlocks(t *testing.T) { + + if !avx512 { + t.SkipNow() + return + } + + inputSingleBlock := testSha256Avx512(t, 31, 0) + inputMultiBlock := testSha256Avx512(t, 47, 55) + + input := [16][]byte{} + + for i := range input { + if i%3 == 0 { + input[i] = inputMultiBlock[i] + } else if i%3 == 1 { + input[i] = inputSingleBlock[i] + } else { + input[i] = nil + } + } + + mask := [3]uint64{0xb6db, 0x9249, 0x9249} + output := blockAvx512(initDigests(), input, mask[:]) + var offset int + for i := 0; i < len(output); i++ { + if i%3 == 2 { // for nil inputs + initvec := [32]byte{0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, + 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, + 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, 0x68, 0x8c, + 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19} + if bytes.Compare(output[i][:], initvec[:]) != 0 { + t.Fatalf("Sum256 function: sha256 for nil vector = %s want %s", hex.EncodeToString(output[i][:]), hex.EncodeToString(initvec[:])) + } + continue + } + if i%3 == 0 { + offset = 47 + } else { + offset = 31 + } + if bytes.Compare(output[i][:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, hex.EncodeToString(output[i][:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } +} + +func TestAvx512Server(t *testing.T) { + + if !avx512 { + t.SkipNow() + return + } + + const offset = 31 + 16 + server := NewAvx512Server() + + // First block of 64 bytes + for i := 0; i < 16; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in) + server.Write(uint64(Avx512ServerUID+i), input) + } + + // Second block of 64 bytes + for i := 0; i < 16; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in[64:]) + server.Write(uint64(Avx512ServerUID+i), input) + } + + wg := sync.WaitGroup{} + wg.Add(16) + + // Third and final block + for i := 0; i < 16; i++ { + input := make([]byte, 64) + input[0] = 0x80 + copy(input[1:], bytes.Repeat([]byte{0}, 63-8)) + + // Length in bits. + len := uint64(128) + len <<= 3 + for ii := uint(0); ii < 8; ii++ { + input[63-8+1+ii] = byte(len >> (56 - 8*ii)) + } + go func(i int, uid uint64, input []byte) { + output := server.Sum(uid, input) + if bytes.Compare(output[:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, hex.EncodeToString(output[:]), hex.EncodeToString(golden[offset+i].out[:])) + } + wg.Done() + }(i, uint64(Avx512ServerUID+i), input) + } + + wg.Wait() +} + +func TestAvx512Digest(t *testing.T) { + + if !avx512 { + t.SkipNow() + return + } + + server := NewAvx512Server() + + const tests = 16 + h512 := [16]hash.Hash{} + for i := 0; i < tests; i++ { + h512[i] = NewAvx512(server) + } + + const offset = 31 + 16 + for i := 0; i < tests; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in) + h512[i].Write(input) + } + for i := 0; i < tests; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in[64:]) + h512[i].Write(input) + } + for i := 0; i < tests; i++ { + output := h512[i].Sum([]byte{}) + if bytes.Compare(output[:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, hex.EncodeToString(output[:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } +} + +func benchmarkAvx512SingleCore(h512 []hash.Hash, body []byte) { + + for i := 0; i < len(h512); i++ { + h512[i].Write(body) + } + for i := 0; i < len(h512); i++ { + _ = h512[i].Sum([]byte{}) + } +} + +func benchmarkAvx512(b *testing.B, size int) { + + if !avx512 { + b.SkipNow() + return + } + + server := NewAvx512Server() + + const tests = 16 + body := make([]byte, size) + + b.SetBytes(int64(len(body) * tests)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + h512 := make([]hash.Hash, tests) + for i := 0; i < tests; i++ { + h512[i] = NewAvx512(server) + } + + benchmarkAvx512SingleCore(h512, body) + } +} + +func BenchmarkAvx512_05M(b *testing.B) { benchmarkAvx512(b, 512*1024) } +func BenchmarkAvx512_1M(b *testing.B) { benchmarkAvx512(b, 1*1024*1024) } +func BenchmarkAvx512_5M(b *testing.B) { benchmarkAvx512(b, 5*1024*1024) } +func BenchmarkAvx512_10M(b *testing.B) { benchmarkAvx512(b, 10*1024*1024) } + +func benchmarkAvx512MultiCore(b *testing.B, size, cores int) { + + if !avx512 { + b.SkipNow() + return + } + + servers := make([]*Avx512Server, cores) + for c := 0; c < cores; c++ { + servers[c] = NewAvx512Server() + } + + const tests = 16 + + body := make([]byte, size) + + h512 := make([]hash.Hash, tests*cores) + for i := 0; i < tests*cores; i++ { + h512[i] = NewAvx512(servers[i>>4]) + } + + b.SetBytes(int64(size * 16 * cores)) + b.ResetTimer() + + var wg sync.WaitGroup + + for i := 0; i < b.N; i++ { + wg.Add(cores) + for c := 0; c < cores; c++ { + go func(c int) { benchmarkAvx512SingleCore(h512[c*tests:(c+1)*tests], body); wg.Done() }(c) + } + wg.Wait() + } +} + +func BenchmarkAvx512_5M_2Cores(b *testing.B) { benchmarkAvx512MultiCore(b, 5*1024*1024, 2) } +func BenchmarkAvx512_5M_4Cores(b *testing.B) { benchmarkAvx512MultiCore(b, 5*1024*1024, 4) } +func BenchmarkAvx512_5M_6Cores(b *testing.B) { benchmarkAvx512MultiCore(b, 5*1024*1024, 6) } + +type maskTest struct { + in [16]int + out [16]maskRounds +} + +var goldenMask = []maskTest{ + {[16]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [16]maskRounds{}}, + {[16]int{64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0}, [16]maskRounds{{0x5555, 1}}}, + {[16]int{0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64}, [16]maskRounds{{0xaaaa, 1}}}, + {[16]int{64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64}, [16]maskRounds{{0xffff, 1}}}, + {[16]int{128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, [16]maskRounds{{0xffff, 2}}}, + {[16]int{64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128}, [16]maskRounds{{0xffff, 1}, {0xaaaa, 1}}}, + {[16]int{128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64}, [16]maskRounds{{0xffff, 1}, {0x5555, 1}}}, + {[16]int{64, 192, 64, 192, 64, 192, 64, 192, 64, 192, 64, 192, 64, 192, 64, 192}, [16]maskRounds{{0xffff, 1}, {0xaaaa, 2}}}, + // + // >= 64 0110=6 1011=b 1101=d 0110=6 + // >=128 0100=4 0010=2 1001=9 0100=4 + {[16]int{0, 64, 128, 0, 64, 128, 0, 64, 128, 0, 64, 128, 0, 64, 128, 0}, [16]maskRounds{{0x6db6, 1}, {0x4924, 1}}}, + {[16]int{1 * 64, 2 * 64, 3 * 64, 4 * 64, 5 * 64, 6 * 64, 7 * 64, 8 * 64, 9 * 64, 10 * 64, 11 * 64, 12 * 64, 13 * 64, 14 * 64, 15 * 64, 16 * 64}, + [16]maskRounds{{0xffff, 1}, {0xfffe, 1}, {0xfffc, 1}, {0xfff8, 1}, {0xfff0, 1}, {0xffe0, 1}, {0xffc0, 1}, {0xff80, 1}, + {0xff00, 1}, {0xfe00, 1}, {0xfc00, 1}, {0xf800, 1}, {0xf000, 1}, {0xe000, 1}, {0xc000, 1}, {0x8000, 1}}}, + {[16]int{2 * 64, 1 * 64, 3 * 64, 4 * 64, 5 * 64, 6 * 64, 7 * 64, 8 * 64, 9 * 64, 10 * 64, 11 * 64, 12 * 64, 13 * 64, 14 * 64, 15 * 64, 16 * 64}, + [16]maskRounds{{0xffff, 1}, {0xfffd, 1}, {0xfffc, 1}, {0xfff8, 1}, {0xfff0, 1}, {0xffe0, 1}, {0xffc0, 1}, {0xff80, 1}, + {0xff00, 1}, {0xfe00, 1}, {0xfc00, 1}, {0xf800, 1}, {0xf000, 1}, {0xe000, 1}, {0xc000, 1}, {0x8000, 1}}}, + {[16]int{10 * 64, 20 * 64, 30 * 64, 40 * 64, 50 * 64, 60 * 64, 70 * 64, 80 * 64, 90 * 64, 100 * 64, 110 * 64, 120 * 64, 130 * 64, 140 * 64, 150 * 64, 160 * 64}, + [16]maskRounds{{0xffff, 10}, {0xfffe, 10}, {0xfffc, 10}, {0xfff8, 10}, {0xfff0, 10}, {0xffe0, 10}, {0xffc0, 10}, {0xff80, 10}, + {0xff00, 10}, {0xfe00, 10}, {0xfc00, 10}, {0xf800, 10}, {0xf000, 10}, {0xe000, 10}, {0xc000, 10}, {0x8000, 10}}}, + {[16]int{10 * 64, 19 * 64, 27 * 64, 34 * 64, 40 * 64, 45 * 64, 49 * 64, 52 * 64, 54 * 64, 55 * 64, 57 * 64, 60 * 64, 64 * 64, 69 * 64, 75 * 64, 82 * 64}, + [16]maskRounds{{0xffff, 10}, {0xfffe, 9}, {0xfffc, 8}, {0xfff8, 7}, {0xfff0, 6}, {0xffe0, 5}, {0xffc0, 4}, {0xff80, 3}, + {0xff00, 2}, {0xfe00, 1}, {0xfc00, 2}, {0xf800, 3}, {0xf000, 4}, {0xe000, 5}, {0xc000, 6}, {0x8000, 7}}}, +} + +func TestMaskGen(t *testing.T) { + input := [16][]byte{} + for gcase, g := range goldenMask { + for i, l := range g.in { + buf := make([]byte, l) + input[i] = buf[:] + } + + mr := genMask(input) + + if !reflect.DeepEqual(mr, g.out) { + t.Fatalf("case %d: got %04x\n want %04x", gcase, mr, g.out) + } + } +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go new file mode 100644 index 0000000000..c2f71181f3 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go @@ -0,0 +1,22 @@ +//+build !noasm,!appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +//go:noescape +func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s new file mode 100644 index 0000000000..9f444d49f5 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s @@ -0,0 +1,408 @@ +//+build !noasm,!appengine + +// SHA256 implementation for AVX + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// This code is based on an Intel White-Paper: +// "Fast SHA-256 Implementations on Intel Architecture Processors" +// +// together with the reference implementation from the following authors: +// James Guilford +// Kirk Yap +// Tim Chen +// +// For Golang it has been converted to Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 +// equivalents +// + +#include "textflag.h" + +#define ROTATE_XS \ + MOVOU X4, X15 \ + MOVOU X5, X4 \ + MOVOU X6, X5 \ + MOVOU X7, X6 \ + MOVOU X15, X7 + +// compute s0 four at a time and s1 two at a time +// compute W[-16] + W[-7] 4 at a time +#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + LONG $0x0f41e3c4; WORD $0x04c6 \ // VPALIGNR XMM0,XMM7,XMM6,0x4 /* XTMP0 = W[-7] */ + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL f, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + XORL g, R15 \ // y2 = f^g + LONG $0xc4fef9c5 \ // VPADDD XMM0,XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) + ANDL e, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + \ + \ // compute s0 + \ + LONG $0x0f51e3c4; WORD $0x04cc \ // VPALIGNR XMM1,XMM5,XMM4,0x4 /* XTMP1 = W[-15] */ + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + \ // ROTATE_ARGS + MOVL a, R15 \ // y2 = a + LONG $0xd172e9c5; BYTE $0x07 \ // VPSRLD XMM2,XMM1,0x7 /* */ + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + LONG $0xf172e1c5; BYTE $0x19 \ // VPSLLD XMM3,XMM1,0x19 /* */ + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + LONG $0xdaebe1c5 \ // VPOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL d, R13 \ // y0 = e + MOVL h, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL d, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL e, R15 \ // y2 = f + ROLL $23, R14 \ // y1 = a >> (22-13) + LONG $0xd172e9c5; BYTE $0x12 \ // VPSRLD XMM2,XMM1,0x12 /* */ + XORL h, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL f, R15 \ // y2 = f^g + LONG $0xd172b9c5; BYTE $0x03 \ // VPSRLD XMM8,XMM1,0x3 /* XTMP4 = W[-15] >> 3 */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL d, R15 \ // y2 = (f^g)&e + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xf172f1c5; BYTE $0x0e \ // VPSLLD XMM1,XMM1,0xe /* */ + XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL f, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd9efe1c5 \ // VPXOR XMM3,XMM3,XMM1 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + LONG $0xdaefe1c5 \ // VPXOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ + MOVL h, R13 \ // y0 = a + ADDL R15, g \ // h = h + S1 + CH + k + w + MOVL h, R15 \ // y2 = a + LONG $0xef61c1c4; BYTE $0xc8 \ // VPXOR XMM1,XMM3,XMM8 /* XTMP1 = s0 */ + ORL b, R13 \ // y0 = a|c + ADDL g, c \ // d = d + h + S1 + CH + k + w + ANDL b, R15 \ // y2 = a&c + \ + \ // compute low s1 + \ + LONG $0xd770f9c5; BYTE $0xfa \ // VPSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ + ANDL a, R13 \ // y0 = (a|c)&b + ADDL R14, g \ // h = h + S1 + CH + k + w + S0 + LONG $0xc1fef9c5 \ // VPADDD XMM0,XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL c, R13 \ // y0 = e + MOVL g, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL c, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL d, R15 \ // y2 = f + XORL g, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + LONG $0xd272b9c5; BYTE $0x0a \ // VPSRLD XMM8,XMM2,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ + XORL e, R15 \ // y2 = f^g + LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ + XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL c, R15 \ // y2 = (f^g)&e + LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL e, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xc2ef39c5 \ // VPXOR XMM8,XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ + MOVL g, R13 \ // y0 = a + ADDL R15, f \ // h = h + S1 + CH + k + w + MOVL g, R15 \ // y2 = a + LONG $0x003942c4; BYTE $0xc2 \ // VPSHUFB XMM8,XMM8,XMM10 /* XTMP4 = s1 {00BA} */ + ORL a, R13 \ // y0 = a|c + ADDL f, b \ // d = d + h + S1 + CH + k + w + ANDL a, R15 \ // y2 = a&c + LONG $0xfe79c1c4; BYTE $0xc0 \ // VPADDD XMM0,XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ + ANDL h, R13 \ // y0 = (a|c)&b + ADDL R14, f \ // h = h + S1 + CH + k + w + S0 + \ + \ // compute high s1 + \ + LONG $0xd070f9c5; BYTE $0x50 \ // VPSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL b, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL f, R14 \ // y1 = a + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL b, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL c, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + LONG $0xd272a1c5; BYTE $0x0a \ // VPSRLD XMM11,XMM2,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ + XORL f, R14 \ // y1 = a ^ (a >> (22-13) + XORL d, R15 \ // y2 = f^g + LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ + XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL b, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ + XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL d, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xdaef21c5 \ // VPXOR XMM11,XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ + MOVL f, R13 \ // y0 = a + ADDL R15, e \ // h = h + S1 + CH + k + w + MOVL f, R15 \ // y2 = a + LONG $0x002142c4; BYTE $0xdc \ // VPSHUFB XMM11,XMM11,XMM12 /* XTMP5 = s1 {DC00} */ + ORL h, R13 \ // y0 = a|c + ADDL e, a \ // d = d + h + S1 + CH + k + w + ANDL h, R15 \ // y2 = a&c + LONG $0xe0fea1c5 \ // VPADDD XMM4,XMM11,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ + ANDL g, R13 \ // y0 = (a|c)&b + ADDL R14, e \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + ROTATE_XS + +#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL f, R15 \ // y2 = f + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL g, R15 \ // y2 = f^g + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + ANDL e, R15 \ // y2 = (f^g)&e + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + MOVL a, R15 \ // y2 = a + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ + +// func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) +TEXT ·blockAvx(SB), 7, $0-80 + + MOVQ h+0(FP), SI // SI: &h + MOVQ message_base+24(FP), R8 // &message + MOVQ message_len+32(FP), R9 // length of message + CMPQ R9, $0 + JEQ done_hash + ADDQ R8, R9 + MOVQ R9, reserved2+64(FP) // store end of message + + // Register definition + // a --> eax + // b --> ebx + // c --> ecx + // d --> r8d + // e --> edx + // f --> r9d + // g --> r10d + // h --> r11d + // + // y0 --> r13d + // y1 --> r14d + // y2 --> r15d + + MOVL (0*4)(SI), AX // a = H0 + MOVL (1*4)(SI), BX // b = H1 + MOVL (2*4)(SI), CX // c = H2 + MOVL (3*4)(SI), R8 // d = H3 + MOVL (4*4)(SI), DX // e = H4 + MOVL (5*4)(SI), R9 // f = H5 + MOVL (6*4)(SI), R10 // g = H6 + MOVL (7*4)(SI), R11 // h = H7 + + MOVOU bflipMask<>(SB), X13 + MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA + MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 + + MOVQ message_base+24(FP), SI // SI: &message + +loop0: + LEAQ constants<>(SB), BP + + // byte swap first 16 dwords + MOVOU 0*16(SI), X4 + LONG $0x0059c2c4; BYTE $0xe5 // VPSHUFB XMM4, XMM4, XMM13 + MOVOU 1*16(SI), X5 + LONG $0x0051c2c4; BYTE $0xed // VPSHUFB XMM5, XMM5, XMM13 + MOVOU 2*16(SI), X6 + LONG $0x0049c2c4; BYTE $0xf5 // VPSHUFB XMM6, XMM6, XMM13 + MOVOU 3*16(SI), X7 + LONG $0x0041c2c4; BYTE $0xfd // VPSHUFB XMM7, XMM7, XMM13 + + MOVQ SI, reserved3+72(FP) + MOVD $0x3, DI + + // schedule 48 input dwords, by doing 3 rounds of 16 each +loop1: + LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + LONG $0x4dfe59c5; BYTE $0x10 // VPADDD XMM9, XMM4, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + LONG $0x4dfe59c5; BYTE $0x20 // VPADDD XMM9, XMM4, 32[RBP] /* Add 3rd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + LONG $0x4dfe59c5; BYTE $0x30 // VPADDD XMM9, XMM4, 48[RBP] /* Add 4th constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $64, BP + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + SUBQ $1, DI + JNE loop1 + + MOVD $0x2, DI + +loop2: + LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) + DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) + DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) + DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) + + LONG $0x4dfe51c5; BYTE $0x10 // VPADDD XMM9, XMM5, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $32, BP + DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) + DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) + DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) + DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) + + MOVOU X6, X4 + MOVOU X7, X5 + + SUBQ $1, DI + JNE loop2 + + MOVQ h+0(FP), SI // SI: &h + ADDL (0*4)(SI), AX // H0 = a + H0 + MOVL AX, (0*4)(SI) + ADDL (1*4)(SI), BX // H1 = b + H1 + MOVL BX, (1*4)(SI) + ADDL (2*4)(SI), CX // H2 = c + H2 + MOVL CX, (2*4)(SI) + ADDL (3*4)(SI), R8 // H3 = d + H3 + MOVL R8, (3*4)(SI) + ADDL (4*4)(SI), DX // H4 = e + H4 + MOVL DX, (4*4)(SI) + ADDL (5*4)(SI), R9 // H5 = f + H5 + MOVL R9, (5*4)(SI) + ADDL (6*4)(SI), R10 // H6 = g + H6 + MOVL R10, (6*4)(SI) + ADDL (7*4)(SI), R11 // H7 = h + H7 + MOVL R11, (7*4)(SI) + + MOVQ reserved3+72(FP), SI + ADDQ $64, SI + CMPQ reserved2+64(FP), SI + JNE loop0 + +done_hash: + RET + +// Constants table +DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 +DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b +DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 +DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be +DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 +DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 +DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 +DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc +DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 +DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e +DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 +DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 +DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c +DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee +DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 +DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa +DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 +DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b + +DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 +DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 + +GLOBL constants<>(SB), 8, $256 +GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 +GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 +GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go new file mode 100644 index 0000000000..483689ef0f --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go @@ -0,0 +1,6 @@ +//+build !noasm,!appengine + +package sha256 + +//go:noescape +func blockSha(h *[8]uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s new file mode 100644 index 0000000000..909fc0ef85 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s @@ -0,0 +1,266 @@ +//+build !noasm,!appengine + +// SHA intrinsic version of SHA256 + +// Kristofer Peterson, (C) 2018. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "textflag.h" + +DATA K<>+0x00(SB)/4, $0x428a2f98 +DATA K<>+0x04(SB)/4, $0x71374491 +DATA K<>+0x08(SB)/4, $0xb5c0fbcf +DATA K<>+0x0c(SB)/4, $0xe9b5dba5 +DATA K<>+0x10(SB)/4, $0x3956c25b +DATA K<>+0x14(SB)/4, $0x59f111f1 +DATA K<>+0x18(SB)/4, $0x923f82a4 +DATA K<>+0x1c(SB)/4, $0xab1c5ed5 +DATA K<>+0x20(SB)/4, $0xd807aa98 +DATA K<>+0x24(SB)/4, $0x12835b01 +DATA K<>+0x28(SB)/4, $0x243185be +DATA K<>+0x2c(SB)/4, $0x550c7dc3 +DATA K<>+0x30(SB)/4, $0x72be5d74 +DATA K<>+0x34(SB)/4, $0x80deb1fe +DATA K<>+0x38(SB)/4, $0x9bdc06a7 +DATA K<>+0x3c(SB)/4, $0xc19bf174 +DATA K<>+0x40(SB)/4, $0xe49b69c1 +DATA K<>+0x44(SB)/4, $0xefbe4786 +DATA K<>+0x48(SB)/4, $0x0fc19dc6 +DATA K<>+0x4c(SB)/4, $0x240ca1cc +DATA K<>+0x50(SB)/4, $0x2de92c6f +DATA K<>+0x54(SB)/4, $0x4a7484aa +DATA K<>+0x58(SB)/4, $0x5cb0a9dc +DATA K<>+0x5c(SB)/4, $0x76f988da +DATA K<>+0x60(SB)/4, $0x983e5152 +DATA K<>+0x64(SB)/4, $0xa831c66d +DATA K<>+0x68(SB)/4, $0xb00327c8 +DATA K<>+0x6c(SB)/4, $0xbf597fc7 +DATA K<>+0x70(SB)/4, $0xc6e00bf3 +DATA K<>+0x74(SB)/4, $0xd5a79147 +DATA K<>+0x78(SB)/4, $0x06ca6351 +DATA K<>+0x7c(SB)/4, $0x14292967 +DATA K<>+0x80(SB)/4, $0x27b70a85 +DATA K<>+0x84(SB)/4, $0x2e1b2138 +DATA K<>+0x88(SB)/4, $0x4d2c6dfc +DATA K<>+0x8c(SB)/4, $0x53380d13 +DATA K<>+0x90(SB)/4, $0x650a7354 +DATA K<>+0x94(SB)/4, $0x766a0abb +DATA K<>+0x98(SB)/4, $0x81c2c92e +DATA K<>+0x9c(SB)/4, $0x92722c85 +DATA K<>+0xa0(SB)/4, $0xa2bfe8a1 +DATA K<>+0xa4(SB)/4, $0xa81a664b +DATA K<>+0xa8(SB)/4, $0xc24b8b70 +DATA K<>+0xac(SB)/4, $0xc76c51a3 +DATA K<>+0xb0(SB)/4, $0xd192e819 +DATA K<>+0xb4(SB)/4, $0xd6990624 +DATA K<>+0xb8(SB)/4, $0xf40e3585 +DATA K<>+0xbc(SB)/4, $0x106aa070 +DATA K<>+0xc0(SB)/4, $0x19a4c116 +DATA K<>+0xc4(SB)/4, $0x1e376c08 +DATA K<>+0xc8(SB)/4, $0x2748774c +DATA K<>+0xcc(SB)/4, $0x34b0bcb5 +DATA K<>+0xd0(SB)/4, $0x391c0cb3 +DATA K<>+0xd4(SB)/4, $0x4ed8aa4a +DATA K<>+0xd8(SB)/4, $0x5b9cca4f +DATA K<>+0xdc(SB)/4, $0x682e6ff3 +DATA K<>+0xe0(SB)/4, $0x748f82ee +DATA K<>+0xe4(SB)/4, $0x78a5636f +DATA K<>+0xe8(SB)/4, $0x84c87814 +DATA K<>+0xec(SB)/4, $0x8cc70208 +DATA K<>+0xf0(SB)/4, $0x90befffa +DATA K<>+0xf4(SB)/4, $0xa4506ceb +DATA K<>+0xf8(SB)/4, $0xbef9a3f7 +DATA K<>+0xfc(SB)/4, $0xc67178f2 +GLOBL K<>(SB), RODATA|NOPTR, $256 + +DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203 +DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b +GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16 + +// Register Usage +// BX base address of constant table (constant) +// DX hash_state (constant) +// SI hash_data.data +// DI hash_data.data + hash_data.length - 64 (constant) +// X0 scratch +// X1 scratch +// X2 working hash state // ABEF +// X3 working hash state // CDGH +// X4 first 16 bytes of block +// X5 second 16 bytes of block +// X6 third 16 bytes of block +// X7 fourth 16 bytes of block +// X12 saved hash state // ABEF +// X13 saved hash state // CDGH +// X15 data shuffle mask (constant) + +TEXT ·blockSha(SB), NOSPLIT, $0-32 + MOVQ h+0(FP), DX + MOVQ message_base+8(FP), SI + MOVQ message_len+16(FP), DI + LEAQ -64(SI)(DI*1), DI + MOVOU (DX), X2 + MOVOU 16(DX), X1 + MOVO X2, X3 + PUNPCKLLQ X1, X2 + PUNPCKHLQ X1, X3 + PSHUFD $0x27, X2, X2 + PSHUFD $0x27, X3, X3 + MOVO SHUF_MASK<>(SB), X15 + LEAQ K<>(SB), BX + + JMP TEST + +LOOP: + MOVO X2, X12 + MOVO X3, X13 + + // load block and shuffle + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOU 32(SI), X6 + MOVOU 48(SI), X7 + PSHUFB X15, X4 + PSHUFB X15, X5 + PSHUFB X15, X6 + PSHUFB X15, X7 + +#define ROUND456 \ + PADDL X5, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 \ + LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 \ + LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + +#define ROUND567 \ + PADDL X6, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 \ + LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 \ + LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + +#define ROUND674 \ + PADDL X7, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X7, X1 \ + LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4 + PADDL X1, X4 \ + LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7 + +#define ROUND745 \ + PADDL X4, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X4, X1 \ + LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4 + PADDL X1, X5 \ + LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4 + + // rounds 0-3 + MOVO (BX), X0 + PADDL X4, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 4-7 + MOVO 1*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + + // rounds 8-11 + MOVO 2*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + + MOVO 3*16(BX), X0; ROUND674 // rounds 12-15 + MOVO 4*16(BX), X0; ROUND745 // rounds 16-19 + MOVO 5*16(BX), X0; ROUND456 // rounds 20-23 + MOVO 6*16(BX), X0; ROUND567 // rounds 24-27 + MOVO 7*16(BX), X0; ROUND674 // rounds 28-31 + MOVO 8*16(BX), X0; ROUND745 // rounds 32-35 + MOVO 9*16(BX), X0; ROUND456 // rounds 36-39 + MOVO 10*16(BX), X0; ROUND567 // rounds 40-43 + MOVO 11*16(BX), X0; ROUND674 // rounds 44-47 + MOVO 12*16(BX), X0; ROUND745 // rounds 48-51 + + // rounds 52-55 + MOVO 13*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 + LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 + LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 56-59 + MOVO 14*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 + LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 + LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 60-63 + MOVO 15*16(BX), X0 + PADDL X7, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + PADDL X12, X2 + PADDL X13, X3 + + ADDQ $64, SI + +TEST: + CMPQ SI, DI + JBE LOOP + + PSHUFD $0x4e, X3, X0 + LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0 + PSHUFD $0x4e, X2, X1 + LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f + PSHUFD $0x1b, X0, X0 + PSHUFD $0x1b, X1, X1 + + MOVOU X0, (DX) + MOVOU X1, 16(DX) + + RET diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64_test.go b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64_test.go new file mode 100644 index 0000000000..c43202eba9 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64_test.go @@ -0,0 +1,77 @@ +//+build !noasm,!appengine + +package sha256 + +import ( + "crypto/sha256" + "encoding/binary" + "testing" +) + +func sha256hash(m []byte) (r [32]byte) { + var h [8]uint32 + + h[0] = 0x6a09e667 + h[1] = 0xbb67ae85 + h[2] = 0x3c6ef372 + h[3] = 0xa54ff53a + h[4] = 0x510e527f + h[5] = 0x9b05688c + h[6] = 0x1f83d9ab + h[7] = 0x5be0cd19 + + blockSha(&h, m) + l0 := len(m) + l := l0 & (BlockSize - 1) + m = m[l0-l:] + + var k [64]byte + copy(k[:], m) + + k[l] = 0x80 + + if l >= 56 { + blockSha(&h, k[:]) + binary.LittleEndian.PutUint64(k[0:8], 0) + binary.LittleEndian.PutUint64(k[8:16], 0) + binary.LittleEndian.PutUint64(k[16:24], 0) + binary.LittleEndian.PutUint64(k[24:32], 0) + binary.LittleEndian.PutUint64(k[32:40], 0) + binary.LittleEndian.PutUint64(k[40:48], 0) + binary.LittleEndian.PutUint64(k[48:56], 0) + } + binary.BigEndian.PutUint64(k[56:64], uint64(l0)<<3) + blockSha(&h, k[:]) + + binary.BigEndian.PutUint32(r[0:4], h[0]) + binary.BigEndian.PutUint32(r[4:8], h[1]) + binary.BigEndian.PutUint32(r[8:12], h[2]) + binary.BigEndian.PutUint32(r[12:16], h[3]) + binary.BigEndian.PutUint32(r[16:20], h[4]) + binary.BigEndian.PutUint32(r[20:24], h[5]) + binary.BigEndian.PutUint32(r[24:28], h[6]) + binary.BigEndian.PutUint32(r[28:32], h[7]) + + return +} + +func runTestSha(hashfunc func([]byte) [32]byte) bool { + var m = []byte("This is a message. This is a message. This is a message. This is a message.") + + ar := hashfunc(m) + br := sha256.Sum256(m) + + return ar == br +} + +func TestSha0(t *testing.T) { + if !runTestSha(Sum256) { + t.Errorf("FAILED") + } +} + +func TestSha1(t *testing.T) { + if sha && ssse3 && sse41 && !runTestSha(sha256hash) { + t.Errorf("FAILED") + } +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go new file mode 100644 index 0000000000..1ae2320bd5 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go @@ -0,0 +1,22 @@ +//+build !noasm,!appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +//go:noescape +func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s new file mode 100644 index 0000000000..7afb45c87f --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s @@ -0,0 +1,429 @@ +//+build !noasm,!appengine + +// SHA256 implementation for SSSE3 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// This code is based on an Intel White-Paper: +// "Fast SHA-256 Implementations on Intel Architecture Processors" +// +// together with the reference implementation from the following authors: +// James Guilford +// Kirk Yap +// Tim Chen +// +// For Golang it has been converted to Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 +// equivalents +// + +#include "textflag.h" + +#define ROTATE_XS \ + MOVOU X4, X15 \ + MOVOU X5, X4 \ + MOVOU X6, X5 \ + MOVOU X7, X6 \ + MOVOU X15, X7 + +// compute s0 four at a time and s1 two at a time +// compute W[-16] + W[-7] 4 at a time +#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + MOVOU X7, X0 \ + LONG $0x0f3a0f66; WORD $0x04c6 \ // PALIGNR XMM0,XMM6,0x4 /* XTMP0 = W[-7] */ + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL f, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + XORL g, R15 \ // y2 = f^g + LONG $0xc4fe0f66 \ // PADDD XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) + ANDL e, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + \ + \ // compute s0 + \ + MOVOU X5, X1 \ + LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1,XMM4,0x4 /* XTMP1 = W[-15] */ + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + \ // ROTATE_ARGS + MOVL a, R15 \ // y2 = a + MOVOU X1, X2 \ + LONG $0xd2720f66; BYTE $0x07 \ // PSRLD XMM2,0x7 /* */ + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + MOVOU X1, X3 \ + LONG $0xf3720f66; BYTE $0x19 \ // PSLLD XMM3,0x19 /* */ + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + LONG $0xdaeb0f66 \ // POR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL d, R13 \ // y0 = e + MOVL h, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL d, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL e, R15 \ // y2 = f + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVOU X1, X2 \ + LONG $0xd2720f66; BYTE $0x12 \ // PSRLD XMM2,0x12 /* */ + XORL h, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL f, R15 \ // y2 = f^g + MOVOU X1, X8 \ + LONG $0x720f4166; WORD $0x03d0 \ // PSRLD XMM8,0x3 /* XTMP4 = W[-15] >> 3 */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL d, R15 \ // y2 = (f^g)&e + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xf1720f66; BYTE $0x0e \ // PSLLD XMM1,0xe /* */ + XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL f, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd9ef0f66 \ // PXOR XMM3,XMM1 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + LONG $0xdaef0f66 \ // PXOR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ + MOVL h, R13 \ // y0 = a + ADDL R15, g \ // h = h + S1 + CH + k + w + MOVL h, R15 \ // y2 = a + MOVOU X3, X1 \ + LONG $0xef0f4166; BYTE $0xc8 \ // PXOR XMM1,XMM8 /* XTMP1 = s0 */ + ORL b, R13 \ // y0 = a|c + ADDL g, c \ // d = d + h + S1 + CH + k + w + ANDL b, R15 \ // y2 = a&c + \ + \ // compute low s1 + \ + LONG $0xd7700f66; BYTE $0xfa \ // PSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ + ANDL a, R13 \ // y0 = (a|c)&b + ADDL R14, g \ // h = h + S1 + CH + k + w + S0 + LONG $0xc1fe0f66 \ // PADDD XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL c, R13 \ // y0 = e + MOVL g, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL c, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL d, R15 \ // y2 = f + XORL g, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + MOVOU X2, X8 \ + LONG $0x720f4166; WORD $0x0ad0 \ // PSRLD XMM8,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ + XORL e, R15 \ // y2 = f^g + MOVOU X2, X3 \ + LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ + XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL c, R15 \ // y2 = (f^g)&e + LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL e, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xef0f4466; BYTE $0xc2 \ // PXOR XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ + MOVL g, R13 \ // y0 = a + ADDL R15, f \ // h = h + S1 + CH + k + w + MOVL g, R15 \ // y2 = a + LONG $0x380f4566; WORD $0xc200 \ // PSHUFB XMM8,XMM10 /* XTMP4 = s1 {00BA} */ + ORL a, R13 \ // y0 = a|c + ADDL f, b \ // d = d + h + S1 + CH + k + w + ANDL a, R15 \ // y2 = a&c + LONG $0xfe0f4166; BYTE $0xc0 \ // PADDD XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ + ANDL h, R13 \ // y0 = (a|c)&b + ADDL R14, f \ // h = h + S1 + CH + k + w + S0 + \ + \ // compute high s1 + \ + LONG $0xd0700f66; BYTE $0x50 \ // PSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL b, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL f, R14 \ // y1 = a + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL b, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL c, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + MOVOU X2, X11 \ + LONG $0x720f4166; WORD $0x0ad3 \ // PSRLD XMM11,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ + XORL f, R14 \ // y1 = a ^ (a >> (22-13) + XORL d, R15 \ // y2 = f^g + MOVOU X2, X3 \ + LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ + XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL b, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ + XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL d, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xef0f4466; BYTE $0xda \ // PXOR XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ + MOVL f, R13 \ // y0 = a + ADDL R15, e \ // h = h + S1 + CH + k + w + MOVL f, R15 \ // y2 = a + LONG $0x380f4566; WORD $0xdc00 \ // PSHUFB XMM11,XMM12 /* XTMP5 = s1 {DC00} */ + ORL h, R13 \ // y0 = a|c + ADDL e, a \ // d = d + h + S1 + CH + k + w + ANDL h, R15 \ // y2 = a&c + MOVOU X11, X4 \ + LONG $0xe0fe0f66 \ // PADDD XMM4,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ + ANDL g, R13 \ // y0 = (a|c)&b + ADDL R14, e \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + ROTATE_XS + +#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL f, R15 \ // y2 = f + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL g, R15 \ // y2 = f^g + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + ANDL e, R15 \ // y2 = (f^g)&e + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + MOVL a, R15 \ // y2 = a + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ + +// func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) +TEXT ·blockSsse(SB), 7, $0-80 + + MOVQ h+0(FP), SI // SI: &h + MOVQ message_base+24(FP), R8 // &message + MOVQ message_len+32(FP), R9 // length of message + CMPQ R9, $0 + JEQ done_hash + ADDQ R8, R9 + MOVQ R9, reserved2+64(FP) // store end of message + + // Register definition + // a --> eax + // b --> ebx + // c --> ecx + // d --> r8d + // e --> edx + // f --> r9d + // g --> r10d + // h --> r11d + // + // y0 --> r13d + // y1 --> r14d + // y2 --> r15d + + MOVL (0*4)(SI), AX // a = H0 + MOVL (1*4)(SI), BX // b = H1 + MOVL (2*4)(SI), CX // c = H2 + MOVL (3*4)(SI), R8 // d = H3 + MOVL (4*4)(SI), DX // e = H4 + MOVL (5*4)(SI), R9 // f = H5 + MOVL (6*4)(SI), R10 // g = H6 + MOVL (7*4)(SI), R11 // h = H7 + + MOVOU bflipMask<>(SB), X13 + MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA + MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 + + MOVQ message_base+24(FP), SI // SI: &message + +loop0: + LEAQ constants<>(SB), BP + + // byte swap first 16 dwords + MOVOU 0*16(SI), X4 + LONG $0x380f4166; WORD $0xe500 // PSHUFB XMM4, XMM13 + MOVOU 1*16(SI), X5 + LONG $0x380f4166; WORD $0xed00 // PSHUFB XMM5, XMM13 + MOVOU 2*16(SI), X6 + LONG $0x380f4166; WORD $0xf500 // PSHUFB XMM6, XMM13 + MOVOU 3*16(SI), X7 + LONG $0x380f4166; WORD $0xfd00 // PSHUFB XMM7, XMM13 + + MOVQ SI, reserved3+72(FP) + MOVD $0x3, DI + + // Align + // nop WORD PTR [rax+rax*1+0x0] + + // schedule 48 input dwords, by doing 3 rounds of 16 each +loop1: + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x204d // PADDD XMM9, 32[RBP] /* Add 3rd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x304d // PADDD XMM9, 48[RBP] /* Add 4th constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $64, BP + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + SUBQ $1, DI + JNE loop1 + + MOVD $0x2, DI + +loop2: + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) + DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) + DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) + DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) + + MOVOU X5, X9 + LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $32, BP + DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) + DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) + DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) + DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) + + MOVOU X6, X4 + MOVOU X7, X5 + + SUBQ $1, DI + JNE loop2 + + MOVQ h+0(FP), SI // SI: &h + ADDL (0*4)(SI), AX // H0 = a + H0 + MOVL AX, (0*4)(SI) + ADDL (1*4)(SI), BX // H1 = b + H1 + MOVL BX, (1*4)(SI) + ADDL (2*4)(SI), CX // H2 = c + H2 + MOVL CX, (2*4)(SI) + ADDL (3*4)(SI), R8 // H3 = d + H3 + MOVL R8, (3*4)(SI) + ADDL (4*4)(SI), DX // H4 = e + H4 + MOVL DX, (4*4)(SI) + ADDL (5*4)(SI), R9 // H5 = f + H5 + MOVL R9, (5*4)(SI) + ADDL (6*4)(SI), R10 // H6 = g + H6 + MOVL R10, (6*4)(SI) + ADDL (7*4)(SI), R11 // H7 = h + H7 + MOVL R11, (7*4)(SI) + + MOVQ reserved3+72(FP), SI + ADDQ $64, SI + CMPQ reserved2+64(FP), SI + JNE loop0 + +done_hash: + RET + +// Constants table +DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 +DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b +DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 +DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be +DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 +DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 +DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 +DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc +DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 +DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e +DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 +DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 +DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c +DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee +DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 +DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa +DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 +DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b + +DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 +DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 + +GLOBL constants<>(SB), 8, $256 +GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 +GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 +GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go new file mode 100644 index 0000000000..1c4d97f0c8 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go @@ -0,0 +1,53 @@ +//+build !noasm,!appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockArmGo(dig *digest, p []byte) {} + +func blockAvxGo(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockAvx(h[:], p[:], 0, 0, 0, 0) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] +} + +func blockAvx2Go(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockAvx2(h[:], p[:]) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] +} + +func blockSsseGo(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockSsse(h[:], p[:], 0, 0, 0, 0) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] +} + +func blockShaGo(dig *digest, p []byte) { + + blockSha(&dig.h, p) +} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go new file mode 100644 index 0000000000..0979c20ae6 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go @@ -0,0 +1,37 @@ +//+build !noasm,!appengine + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockShaGo(dig *digest, p []byte) {} + +//go:noescape +func blockArm(h []uint32, message []uint8) + +func blockArmGo(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockArm(h[:], p[:]) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], + h[5], h[6], h[7] +} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s new file mode 100644 index 0000000000..c6ddb37179 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s @@ -0,0 +1,192 @@ +//+build !noasm,!appengine + +// ARM64 version of SHA256 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// Based on implementation as found in https://github.com/jocover/sha256-armv8 +// +// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to +// their Plan9 equivalents +// + +TEXT ·blockArm(SB), 7, $0 + MOVD h+0(FP), R0 + MOVD message+24(FP), R1 + MOVD message_len+32(FP), R2 // length of message + SUBS $64, R2 + BMI complete + + // Load constants table pointer + MOVD $·constants(SB), R3 + + // Cache constants table in registers v16 - v31 + WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64 + WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16 + WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64 + + WORD $0x4c407801 // ld1 {v1.4s}, [x0] + WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64 + WORD $0xd1004000 // sub x0, x0, #0x10 + WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64 + +loop: + // Main loop + WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64 + WORD $0x4ea01c02 // mov v2.16b, v0.16b + WORD $0x4ea11c23 // mov v3.16b, v1.16b + WORD $0x6e2008a5 // rev32 v5.16b, v5.16b + WORD $0x6e2008c6 // rev32 v6.16b, v6.16b + WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s + WORD $0x6e2008e7 // rev32 v7.16b, v7.16b + WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x6e200908 // rev32 v8.16b, v8.16b + WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + + SUBS $64, R2 + BPL loop + + // Store result + WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0] + +complete: + RET + +// Constants table +DATA ·constants+0x0(SB)/8, $0x71374491428a2f98 +DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b +DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98 +DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be +DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152 +DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA ·constants+0x78(SB)/8, $0x1429296706ca6351 +DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85 +DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc +DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354 +DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e +DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819 +DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585 +DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c +DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee +DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814 +DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa +DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +GLOBL ·constants(SB), 8, $256 + diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go new file mode 100644 index 0000000000..0187c950a8 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_other.go @@ -0,0 +1,25 @@ +//+build appengine noasm !amd64,!arm64 + +/* + * Minio Cloud Storage, (C) 2019 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockShaGo(dig *digest, p []byte) {} +func blockArmGo(dig *digest, p []byte) {} diff --git a/vendor/github.com/minio/sha256-simd/test-architectures.sh b/vendor/github.com/minio/sha256-simd/test-architectures.sh new file mode 100755 index 0000000000..50150eaabe --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null ./... + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./... + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./... + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./... +done diff --git a/vendor/github.com/mr-tron/base58/base58/DEPRECATED.md b/vendor/github.com/mr-tron/base58/base58/DEPRECATED.md new file mode 100644 index 0000000000..0cc7ec7229 --- /dev/null +++ b/vendor/github.com/mr-tron/base58/base58/DEPRECATED.md @@ -0,0 +1,4 @@ +Files from this directory was copied to level up directory +========================================================== + +Now all development will be on top level \ No newline at end of file diff --git a/vendor/github.com/mr-tron/base58/base58/alphabet.go b/vendor/github.com/mr-tron/base58/base58/alphabet.go new file mode 100644 index 0000000000..a0f887835a --- /dev/null +++ b/vendor/github.com/mr-tron/base58/base58/alphabet.go @@ -0,0 +1,31 @@ +package base58 + +// Alphabet is a a b58 alphabet. +type Alphabet struct { + decode [128]int8 + encode [58]byte +} + +// NewAlphabet creates a new alphabet from the passed string. +// +// It panics if the passed string is not 58 bytes long or isn't valid ASCII. +func NewAlphabet(s string) *Alphabet { + if len(s) != 58 { + panic("base58 alphabets must be 58 bytes long") + } + ret := new(Alphabet) + copy(ret.encode[:], s) + for i := range ret.decode { + ret.decode[i] = -1 + } + for i, b := range ret.encode { + ret.decode[b] = int8(i) + } + return ret +} + +// BTCAlphabet is the bitcoin base58 alphabet. +var BTCAlphabet = NewAlphabet("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") + +// FlickrAlphabet is the flickr base58 alphabet. +var FlickrAlphabet = NewAlphabet("123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ") diff --git a/vendor/github.com/mr-tron/base58/base58/base58.go b/vendor/github.com/mr-tron/base58/base58/base58.go new file mode 100644 index 0000000000..0bbdfc0b95 --- /dev/null +++ b/vendor/github.com/mr-tron/base58/base58/base58.go @@ -0,0 +1,261 @@ +package base58 + +import ( + "fmt" + "math/big" +) + +var ( + bn0 = big.NewInt(0) + bn58 = big.NewInt(58) +) + +// Encode encodes the passed bytes into a base58 encoded string. +func Encode(bin []byte) string { + return FastBase58Encoding(bin) +} + +// EncodeAlphabet encodes the passed bytes into a base58 encoded string with the +// passed alphabet. +func EncodeAlphabet(bin []byte, alphabet *Alphabet) string { + return FastBase58EncodingAlphabet(bin, alphabet) +} + +// FastBase58Encoding encodes the passed bytes into a base58 encoded string. +func FastBase58Encoding(bin []byte) string { + return FastBase58EncodingAlphabet(bin, BTCAlphabet) +} + +// FastBase58EncodingAlphabet encodes the passed bytes into a base58 encoded +// string with the passed alphabet. +func FastBase58EncodingAlphabet(bin []byte, alphabet *Alphabet) string { + zero := alphabet.encode[0] + + binsz := len(bin) + var i, j, zcount, high int + var carry uint32 + + for zcount < binsz && bin[zcount] == 0 { + zcount++ + } + + size := ((binsz-zcount)*138/100 + 1) + + // allocate one big buffer up front + buf := make([]byte, size*2+zcount) + + // use the second half for the temporary buffer + tmp := buf[size+zcount:] + + high = size - 1 + for i = zcount; i < binsz; i++ { + j = size - 1 + for carry = uint32(bin[i]); j > high || carry != 0; j-- { + carry = carry + 256*uint32(tmp[j]) + tmp[j] = byte(carry % 58) + carry /= 58 + } + high = j + } + + for j = 0; j < size && tmp[j] == 0; j++ { + } + + // Use the first half for the result + b58 := buf[:size-j+zcount] + + if zcount != 0 { + for i = 0; i < zcount; i++ { + b58[i] = zero + } + } + + for i = zcount; j < size; i++ { + b58[i] = alphabet.encode[tmp[j]] + j++ + } + + return string(b58) +} + +// TrivialBase58Encoding encodes the passed bytes into a base58 encoded string +// (inefficiently). +func TrivialBase58Encoding(a []byte) string { + return TrivialBase58EncodingAlphabet(a, BTCAlphabet) +} + +// TrivialBase58EncodingAlphabet encodes the passed bytes into a base58 encoded +// string (inefficiently) with the passed alphabet. +func TrivialBase58EncodingAlphabet(a []byte, alphabet *Alphabet) string { + zero := alphabet.encode[0] + idx := len(a)*138/100 + 1 + buf := make([]byte, idx) + bn := new(big.Int).SetBytes(a) + var mo *big.Int + for bn.Cmp(bn0) != 0 { + bn, mo = bn.DivMod(bn, bn58, new(big.Int)) + idx-- + buf[idx] = alphabet.encode[mo.Int64()] + } + for i := range a { + if a[i] != 0 { + break + } + idx-- + buf[idx] = zero + } + return string(buf[idx:]) +} + +// Decode decodes the base58 encoded bytes. +func Decode(str string) ([]byte, error) { + return FastBase58Decoding(str) +} + +// DecodeAlphabet decodes the base58 encoded bytes using the given b58 alphabet. +func DecodeAlphabet(str string, alphabet *Alphabet) ([]byte, error) { + return FastBase58DecodingAlphabet(str, alphabet) +} + +// FastBase58Decoding decodes the base58 encoded bytes. +func FastBase58Decoding(str string) ([]byte, error) { + return FastBase58DecodingAlphabet(str, BTCAlphabet) +} + +// FastBase58DecodingAlphabet decodes the base58 encoded bytes using the given +// b58 alphabet. +func FastBase58DecodingAlphabet(str string, alphabet *Alphabet) ([]byte, error) { + if len(str) == 0 { + return nil, fmt.Errorf("zero length string") + } + + var ( + t uint64 + zmask, c uint32 + zcount int + + b58u = []rune(str) + b58sz = len(b58u) + + outisz = (b58sz + 3) / 4 // check to see if we need to change this buffer size to optimize + binu = make([]byte, (b58sz+3)*3) + bytesleft = b58sz % 4 + + zero = rune(alphabet.encode[0]) + ) + + if bytesleft > 0 { + zmask = (0xffffffff << uint32(bytesleft*8)) + } else { + bytesleft = 4 + } + + var outi = make([]uint32, outisz) + + for i := 0; i < b58sz && b58u[i] == zero; i++ { + zcount++ + } + + for _, r := range b58u { + if r > 127 { + return nil, fmt.Errorf("High-bit set on invalid digit") + } + if alphabet.decode[r] == -1 { + return nil, fmt.Errorf("Invalid base58 digit (%q)", r) + } + + c = uint32(alphabet.decode[r]) + + for j := (outisz - 1); j >= 0; j-- { + t = uint64(outi[j])*58 + uint64(c) + c = uint32(t>>32) & 0x3f + outi[j] = uint32(t & 0xffffffff) + } + + if c > 0 { + return nil, fmt.Errorf("Output number too big (carry to the next int32)") + } + + if outi[0]&zmask != 0 { + return nil, fmt.Errorf("Output number too big (last int32 filled too far)") + } + } + + // the nested for-loop below is the same as the original code: + // switch (bytesleft) { + // case 3: + // *(binu++) = (outi[0] & 0xff0000) >> 16; + // //-fallthrough + // case 2: + // *(binu++) = (outi[0] & 0xff00) >> 8; + // //-fallthrough + // case 1: + // *(binu++) = (outi[0] & 0xff); + // ++j; + // //-fallthrough + // default: + // break; + // } + // + // for (; j < outisz; ++j) + // { + // *(binu++) = (outi[j] >> 0x18) & 0xff; + // *(binu++) = (outi[j] >> 0x10) & 0xff; + // *(binu++) = (outi[j] >> 8) & 0xff; + // *(binu++) = (outi[j] >> 0) & 0xff; + // } + var j, cnt int + for j, cnt = 0, 0; j < outisz; j++ { + for mask := byte(bytesleft-1) * 8; mask <= 0x18; mask, cnt = mask-8, cnt+1 { + binu[cnt] = byte(outi[j] >> mask) + } + if j == 0 { + bytesleft = 4 // because it could be less than 4 the first time through + } + } + + for n, v := range binu { + if v > 0 { + start := n - zcount + if start < 0 { + start = 0 + } + return binu[start:cnt], nil + } + } + return binu[:cnt], nil +} + +// TrivialBase58Decoding decodes the base58 encoded bytes (inefficiently). +func TrivialBase58Decoding(str string) ([]byte, error) { + return TrivialBase58DecodingAlphabet(str, BTCAlphabet) +} + +// TrivialBase58DecodingAlphabet decodes the base58 encoded bytes +// (inefficiently) using the given b58 alphabet. +func TrivialBase58DecodingAlphabet(str string, alphabet *Alphabet) ([]byte, error) { + zero := alphabet.encode[0] + + var zcount int + for i := 0; i < len(str) && str[i] == zero; i++ { + zcount++ + } + leading := make([]byte, zcount) + + var padChar rune = -1 + src := []byte(str) + j := 0 + for ; j < len(src) && src[j] == byte(padChar); j++ { + } + + n := new(big.Int) + for i := range src[j:] { + c := alphabet.decode[src[i]] + if c == -1 { + return nil, fmt.Errorf("illegal base58 data at input index: %d", i) + } + n.Mul(n, bn58) + n.Add(n, big.NewInt(int64(c))) + } + return append(leading, n.Bytes()...), nil +} diff --git a/vendor/github.com/mr-tron/base58/base58/base58_2_test.go b/vendor/github.com/mr-tron/base58/base58/base58_2_test.go new file mode 100644 index 0000000000..ac7f7b00d5 --- /dev/null +++ b/vendor/github.com/mr-tron/base58/base58/base58_2_test.go @@ -0,0 +1,26 @@ +package base58 + +import "testing" + +func TestBase58_test2(t *testing.T) { + + testAddr := []string{ + "1QCaxc8hutpdZ62iKZsn1TCG3nh7uPZojq", + "1DhRmSGnhPjUaVPAj48zgPV9e2oRhAQFUb", + "17LN2oPYRYsXS9TdYdXCCDvF2FegshLDU2", + "14h2bDLZSuvRFhUL45VjPHJcW667mmRAAn", + } + + for ii, vv := range testAddr { + // num := Base58Decode([]byte(vv)) + // chk := Base58Encode(num) + num, err := FastBase58Decoding(vv) + if err != nil { + t.Errorf("Test %d, expected success, got error %s\n", ii, err) + } + chk := FastBase58Encoding(num) + if vv != string(chk) { + t.Errorf("Test %d, expected=%s got=%s Address did base58 encode/decode correctly.", ii, vv, chk) + } + } +} diff --git a/vendor/github.com/mr-tron/base58/base58/base58_test.go b/vendor/github.com/mr-tron/base58/base58/base58_test.go new file mode 100644 index 0000000000..b87c6f0d5b --- /dev/null +++ b/vendor/github.com/mr-tron/base58/base58/base58_test.go @@ -0,0 +1,117 @@ +package base58 + +import ( + "crypto/rand" + "encoding/hex" + "testing" +) + +type testValues struct { + dec []byte + enc string +} + +var n = 5000000 +var testPairs = make([]testValues, 0, n) + +func initTestPairs() { + if len(testPairs) > 0 { + return + } + // pre-make the test pairs, so it doesn't take up benchmark time... + for i := 0; i < n; i++ { + data := make([]byte, 32) + rand.Read(data) + testPairs = append(testPairs, testValues{dec: data, enc: FastBase58Encoding(data)}) + } +} + +func randAlphabet() *Alphabet { + // Permutes [0, 127] and returns the first 58 elements. + // Like (math/rand).Perm but using crypto/rand. + var randomness [128]byte + rand.Read(randomness[:]) + + var bts [128]byte + for i, r := range randomness { + j := int(r) % (i + 1) + bts[i] = bts[j] + bts[j] = byte(i) + } + return NewAlphabet(string(bts[:58])) +} + +func TestFastEqTrivialEncodingAndDecoding(t *testing.T) { + for k := 0; k < 10; k++ { + testEncDecLoop(t, randAlphabet()) + } + testEncDecLoop(t, BTCAlphabet) + testEncDecLoop(t, FlickrAlphabet) +} + +func testEncDecLoop(t *testing.T, alph *Alphabet) { + for j := 1; j < 256; j++ { + var b = make([]byte, j) + for i := 0; i < 100; i++ { + rand.Read(b) + fe := FastBase58EncodingAlphabet(b, alph) + te := TrivialBase58EncodingAlphabet(b, alph) + + if fe != te { + t.Errorf("encoding err: %#v", hex.EncodeToString(b)) + } + + fd, ferr := FastBase58DecodingAlphabet(fe, alph) + if ferr != nil { + t.Errorf("fast error: %v", ferr) + } + td, terr := TrivialBase58DecodingAlphabet(te, alph) + if terr != nil { + t.Errorf("trivial error: %v", terr) + } + + if hex.EncodeToString(b) != hex.EncodeToString(td) { + t.Errorf("decoding err: %s != %s", hex.EncodeToString(b), hex.EncodeToString(td)) + } + if hex.EncodeToString(b) != hex.EncodeToString(fd) { + t.Errorf("decoding err: %s != %s", hex.EncodeToString(b), hex.EncodeToString(fd)) + } + } + } +} + +func BenchmarkTrivialBase58Encoding(b *testing.B) { + initTestPairs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + TrivialBase58Encoding([]byte(testPairs[i].dec)) + } +} + +func BenchmarkFastBase58Encoding(b *testing.B) { + initTestPairs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + FastBase58Encoding(testPairs[i].dec) + } +} + +func BenchmarkTrivialBase58Decoding(b *testing.B) { + initTestPairs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + TrivialBase58Decoding(testPairs[i].enc) + } +} + +func BenchmarkFastBase58Decoding(b *testing.B) { + initTestPairs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + FastBase58Decoding(testPairs[i].enc) + } +} diff --git a/vendor/github.com/multiformats/go-base32/LICENSE b/vendor/github.com/multiformats/go-base32/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/multiformats/go-base32/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/multiformats/go-base32/base32.go b/vendor/github.com/multiformats/go-base32/base32.go new file mode 100644 index 0000000000..768a235099 --- /dev/null +++ b/vendor/github.com/multiformats/go-base32/base32.go @@ -0,0 +1,505 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package base32 implements base32 encoding as specified by RFC 4648. +package base32 + +import ( + "io" + "strconv" +) + +/* + * Encodings + */ + +// An Encoding is a radix 32 encoding/decoding scheme, defined by a +// 32-character alphabet. The most common is the "base32" encoding +// introduced for SASL GSSAPI and standardized in RFC 4648. +// The alternate "base32hex" encoding is used in DNSSEC. +type Encoding struct { + encode string + decodeMap [256]byte + padChar rune +} + +// Alphabet returns the Base32 alphabet used +func (enc *Encoding) Alphabet() string { + return enc.encode +} + +const ( + StdPadding rune = '=' + NoPadding rune = -1 +) + +const encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" +const encodeHex = "0123456789ABCDEFGHIJKLMNOPQRSTUV" + +// NewEncoding returns a new Encoding defined by the given alphabet, +// which must be a 32-byte string. +func NewEncoding(encoder string) *Encoding { + e := new(Encoding) + e.padChar = StdPadding + e.encode = encoder + for i := 0; i < len(e.decodeMap); i++ { + e.decodeMap[i] = 0xFF + } + for i := 0; i < len(encoder); i++ { + e.decodeMap[encoder[i]] = byte(i) + } + return e +} + +// NewEncoding returns a new case insensitive Encoding defined by the +// given alphabet, which must be a 32-byte string. +func NewEncodingCI(encoder string) *Encoding { + e := new(Encoding) + e.padChar = StdPadding + e.encode = encoder + for i := 0; i < len(e.decodeMap); i++ { + e.decodeMap[i] = 0xFF + } + for i := 0; i < len(encoder); i++ { + e.decodeMap[asciiToLower(encoder[i])] = byte(i) + e.decodeMap[asciiToUpper(encoder[i])] = byte(i) + } + return e +} + +func asciiToLower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c + 32 + } + return c +} + +func asciiToUpper(c byte) byte { + if c >= 'a' && c <= 'z' { + return c - 32 + } + return c +} + +// WithPadding creates a new encoding identical to enc except +// with a specified padding character, or NoPadding to disable padding. +func (enc Encoding) WithPadding(padding rune) *Encoding { + enc.padChar = padding + return &enc +} + +// StdEncoding is the standard base32 encoding, as defined in +// RFC 4648. +var StdEncoding = NewEncodingCI(encodeStd) + +// HexEncoding is the ``Extended Hex Alphabet'' defined in RFC 4648. +// It is typically used in DNS. +var HexEncoding = NewEncodingCI(encodeHex) + +var RawStdEncoding = NewEncodingCI(encodeStd).WithPadding(NoPadding) +var RawHexEncoding = NewEncodingCI(encodeHex).WithPadding(NoPadding) + +/* + * Encoder + */ + +// Encode encodes src using the encoding enc, writing +// EncodedLen(len(src)) bytes to dst. +// +// The encoding pads the output to a multiple of 8 bytes, +// so Encode is not appropriate for use on individual blocks +// of a large data stream. Use NewEncoder() instead. +func (enc *Encoding) Encode(dst, src []byte) { + if len(src) == 0 { + return + } + + for len(src) > 0 { + var carry byte + + // Unpack 8x 5-bit source blocks into a 5 byte + // destination quantum + switch len(src) { + default: + dst[7] = enc.encode[src[4]&0x1F] + carry = src[4] >> 5 + fallthrough + case 4: + dst[6] = enc.encode[carry|(src[3]<<3)&0x1F] + dst[5] = enc.encode[(src[3]>>2)&0x1F] + carry = src[3] >> 7 + fallthrough + case 3: + dst[4] = enc.encode[carry|(src[2]<<1)&0x1F] + carry = (src[2] >> 4) & 0x1F + fallthrough + case 2: + dst[3] = enc.encode[carry|(src[1]<<4)&0x1F] + dst[2] = enc.encode[(src[1]>>1)&0x1F] + carry = (src[1] >> 6) & 0x1F + fallthrough + case 1: + dst[1] = enc.encode[carry|(src[0]<<2)&0x1F] + dst[0] = enc.encode[src[0]>>3] + } + + // Pad the final quantum + if len(src) < 5 { + if enc.padChar != NoPadding { + dst[7] = byte(enc.padChar) + if len(src) < 4 { + dst[6] = byte(enc.padChar) + dst[5] = byte(enc.padChar) + if len(src) < 3 { + dst[4] = byte(enc.padChar) + if len(src) < 2 { + dst[3] = byte(enc.padChar) + dst[2] = byte(enc.padChar) + } + } + } + } + break + } + src = src[5:] + dst = dst[8:] + } +} + +// EncodeToString returns the base32 encoding of src. +func (enc *Encoding) EncodeToString(src []byte) string { + buf := make([]byte, enc.EncodedLen(len(src))) + enc.Encode(buf, src) + return string(buf) +} + +type encoder struct { + err error + enc *Encoding + w io.Writer + buf [5]byte // buffered data waiting to be encoded + nbuf int // number of bytes in buf + out [1024]byte // output buffer +} + +func (e *encoder) Write(p []byte) (n int, err error) { + if e.err != nil { + return 0, e.err + } + + // Leading fringe. + if e.nbuf > 0 { + var i int + for i = 0; i < len(p) && e.nbuf < 5; i++ { + e.buf[e.nbuf] = p[i] + e.nbuf++ + } + n += i + p = p[i:] + if e.nbuf < 5 { + return + } + e.enc.Encode(e.out[0:], e.buf[0:]) + if _, e.err = e.w.Write(e.out[0:8]); e.err != nil { + return n, e.err + } + e.nbuf = 0 + } + + // Large interior chunks. + for len(p) >= 5 { + nn := len(e.out) / 8 * 5 + if nn > len(p) { + nn = len(p) + nn -= nn % 5 + } + e.enc.Encode(e.out[0:], p[0:nn]) + if _, e.err = e.w.Write(e.out[0 : nn/5*8]); e.err != nil { + return n, e.err + } + n += nn + p = p[nn:] + } + + // Trailing fringe. + for i := 0; i < len(p); i++ { + e.buf[i] = p[i] + } + e.nbuf = len(p) + n += len(p) + return +} + +// Close flushes any pending output from the encoder. +// It is an error to call Write after calling Close. +func (e *encoder) Close() error { + // If there's anything left in the buffer, flush it out + if e.err == nil && e.nbuf > 0 { + e.enc.Encode(e.out[0:], e.buf[0:e.nbuf]) + e.nbuf = 0 + _, e.err = e.w.Write(e.out[0:8]) + } + return e.err +} + +// NewEncoder returns a new base32 stream encoder. Data written to +// the returned writer will be encoded using enc and then written to w. +// Base32 encodings operate in 5-byte blocks; when finished +// writing, the caller must Close the returned encoder to flush any +// partially written blocks. +func NewEncoder(enc *Encoding, w io.Writer) io.WriteCloser { + return &encoder{enc: enc, w: w} +} + +// EncodedLen returns the length in bytes of the base32 encoding +// of an input buffer of length n. +func (enc *Encoding) EncodedLen(n int) int { + if enc.padChar == NoPadding { + return (n*8 + 4) / 5 // minimum # chars at 5 bits per char + } + return (n + 4) / 5 * 8 +} + +/* + * Decoder + */ + +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "illegal base32 data at input byte " + strconv.FormatInt(int64(e), 10) +} + +// decode is like Decode but returns an additional 'end' value, which +// indicates if end-of-message padding was encountered and thus any +// additional data is an error. This method assumes that src has been +// stripped of all supported whitespace ('\r' and '\n'). +func (enc *Encoding) decode(dst, src []byte) (n int, end bool, err error) { + olen := len(src) + for len(src) > 0 && !end { + // Decode quantum using the base32 alphabet + var dbuf [8]byte + dlen := 8 + + for j := 0; j < 8; { + if len(src) == 0 { + if enc.padChar != NoPadding { + return n, false, CorruptInputError(olen - len(src) - j) + } + dlen = j + break + } + in := src[0] + src = src[1:] + if in == byte(enc.padChar) && j >= 2 && len(src) < 8 { + if enc.padChar == NoPadding { + return n, false, CorruptInputError(olen) + } + + // We've reached the end and there's padding + if len(src)+j < 8-1 { + // not enough padding + return n, false, CorruptInputError(olen) + } + for k := 0; k < 8-1-j; k++ { + if len(src) > k && src[k] != byte(enc.padChar) { + // incorrect padding + return n, false, CorruptInputError(olen - len(src) + k - 1) + } + } + dlen, end = j, true + // 7, 5 and 2 are not valid padding lengths, and so 1, 3 and 6 are not + // valid dlen values. See RFC 4648 Section 6 "Base 32 Encoding" listing + // the five valid padding lengths, and Section 9 "Illustrations and + // Examples" for an illustration for how the 1st, 3rd and 6th base32 + // src bytes do not yield enough information to decode a dst byte. + if dlen == 1 || dlen == 3 || dlen == 6 { + return n, false, CorruptInputError(olen - len(src) - 1) + } + break + } + dbuf[j] = enc.decodeMap[in] + if dbuf[j] == 0xFF { + return n, false, CorruptInputError(olen - len(src) - 1) + } + j++ + } + + // Pack 8x 5-bit source blocks into 5 byte destination + // quantum + switch dlen { + case 8: + dst[4] = dbuf[6]<<5 | dbuf[7] + fallthrough + case 7: + dst[3] = dbuf[4]<<7 | dbuf[5]<<2 | dbuf[6]>>3 + fallthrough + case 5: + dst[2] = dbuf[3]<<4 | dbuf[4]>>1 + fallthrough + case 4: + dst[1] = dbuf[1]<<6 | dbuf[2]<<1 | dbuf[3]>>4 + fallthrough + case 2: + dst[0] = dbuf[0]<<3 | dbuf[1]>>2 + } + + if len(dst) > 5 { + dst = dst[5:] + } + + switch dlen { + case 2: + n += 1 + case 4: + n += 2 + case 5: + n += 3 + case 7: + n += 4 + case 8: + n += 5 + } + } + return n, end, nil +} + +// Decode decodes src using the encoding enc. It writes at most +// DecodedLen(len(src)) bytes to dst and returns the number of bytes +// written. If src contains invalid base32 data, it will return the +// number of bytes successfully written and CorruptInputError. +// New line characters (\r and \n) are ignored. +func (enc *Encoding) Decode(dst, s []byte) (n int, err error) { + // FIXME: if dst is the same as s use decodeInPlace + stripped := make([]byte, 0, len(s)) + for _, c := range s { + if c != '\r' && c != '\n' { + stripped = append(stripped, c) + } + } + n, _, err = enc.decode(dst, stripped) + return +} + +func (enc *Encoding) decodeInPlace(strb []byte) (n int, err error) { + off := 0 + for _, b := range strb { + if b == '\n' || b == '\r' { + continue + } + strb[off] = b + off++ + } + n, _, err = enc.decode(strb, strb[:off]) + return +} + +// DecodeString returns the bytes represented by the base32 string s. +func (enc *Encoding) DecodeString(s string) ([]byte, error) { + strb := []byte(s) + n, err := enc.decodeInPlace(strb) + if err != nil { + return nil, err + } + return strb[:n], nil +} + +type decoder struct { + err error + enc *Encoding + r io.Reader + end bool // saw end of message + buf [1024]byte // leftover input + nbuf int + out []byte // leftover decoded output + outbuf [1024 / 8 * 5]byte +} + +func (d *decoder) Read(p []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + + // Use leftover decoded output from last read. + if len(d.out) > 0 { + n = copy(p, d.out) + d.out = d.out[n:] + return n, nil + } + + // Read a chunk. + nn := len(p) / 5 * 8 + if nn < 8 { + nn = 8 + } + if nn > len(d.buf) { + nn = len(d.buf) + } + nn, d.err = io.ReadAtLeast(d.r, d.buf[d.nbuf:nn], 8-d.nbuf) + d.nbuf += nn + if d.nbuf < 8 { + return 0, d.err + } + + // Decode chunk into p, or d.out and then p if p is too small. + nr := d.nbuf / 8 * 8 + nw := d.nbuf / 8 * 5 + if nw > len(p) { + nw, d.end, d.err = d.enc.decode(d.outbuf[0:], d.buf[0:nr]) + d.out = d.outbuf[0:nw] + n = copy(p, d.out) + d.out = d.out[n:] + } else { + n, d.end, d.err = d.enc.decode(p, d.buf[0:nr]) + } + d.nbuf -= nr + for i := 0; i < d.nbuf; i++ { + d.buf[i] = d.buf[i+nr] + } + + if d.err == nil { + d.err = err + } + return n, d.err +} + +type newlineFilteringReader struct { + wrapped io.Reader +} + +func (r *newlineFilteringReader) Read(p []byte) (int, error) { + n, err := r.wrapped.Read(p) + for n > 0 { + offset := 0 + for i, b := range p[0:n] { + if b != '\r' && b != '\n' { + if i != offset { + p[offset] = b + } + offset++ + } + } + if offset > 0 { + return offset, err + } + // Previous buffer entirely whitespace, read again + n, err = r.wrapped.Read(p) + } + return n, err +} + +// NewDecoder constructs a new base32 stream decoder. +func NewDecoder(enc *Encoding, r io.Reader) io.Reader { + return &decoder{enc: enc, r: &newlineFilteringReader{r}} +} + +// DecodedLen returns the maximum length in bytes of the decoded data +// corresponding to n bytes of base32-encoded data. +func (enc *Encoding) DecodedLen(n int) int { + if enc.padChar == NoPadding { + return (n*5 + 7) / 8 + } + + return n / 8 * 5 +} diff --git a/vendor/github.com/multiformats/go-base32/go.mod b/vendor/github.com/multiformats/go-base32/go.mod new file mode 100644 index 0000000000..fcc446feaf --- /dev/null +++ b/vendor/github.com/multiformats/go-base32/go.mod @@ -0,0 +1 @@ +module github.com/multiformats/go-base32 diff --git a/vendor/github.com/multiformats/go-base32/package.json b/vendor/github.com/multiformats/go-base32/package.json new file mode 100644 index 0000000000..04a9970d73 --- /dev/null +++ b/vendor/github.com/multiformats/go-base32/package.json @@ -0,0 +1,15 @@ +{ + "author": "Golang", + "bugs": { + "url": "https://github.com/multiformats/go-base32" + }, + "gx": { + "dvcsimport": "github.com/multiformats/go-base32" + }, + "gxVersion": "0.7.0", + "language": "go", + "license": "BSD-3", + "name": "base32", + "version": "0.0.3" +} + diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/.gitignore b/vendor/github.com/multiformats/go-multiaddr-dns/.gitignore new file mode 100644 index 0000000000..4621ab7383 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/.gitignore @@ -0,0 +1 @@ +/madns/madns diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/.travis.yml b/vendor/github.com/multiformats/go-multiaddr-dns/.travis.yml new file mode 100644 index 0000000000..336deb460c --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.12.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/pkg/mod + - /home/travis/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/LICENSE b/vendor/github.com/multiformats/go-multiaddr-dns/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/README.md b/vendor/github.com/multiformats/go-multiaddr-dns/README.md new file mode 100644 index 0000000000..3958c3cf20 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/README.md @@ -0,0 +1,57 @@ +# go-multiaddr-dns + +> Resolve /dns4, /dns6, and /dnsaddr multiaddrs. + +```sh +> madns /dnsaddr/ipfs.io/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/ip4/104.236.151.122/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/ip6/2604:a880:1:20::1d9:6001/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/ip6/fc3d:9a4e:3c96:2fd2:1afa:18fe:8dd2:b602/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/dns4/jupiter.i.ipfs.io/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +/dns6/jupiter.i.ipfs.io/tcp/4001/ipfs/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx +``` + + +In more detail: + +```sh +> madns /dns6/example.net +/ip6/2001:db8::a3 +/ip6/2001:db8::a4 +... + +> madns /dns4/example.net/tcp/443/wss +/ip4/192.0.2.1/tcp/443/wss +/ip4/192.0.2.2/tcp/443/wss + +# No-op if it's not a dns-ish address. + +> madns /ip4/127.0.0.1/tcp/8080 +/ip4/127.0.0.1/tcp/8080 + +# /dnsaddr resolves by looking up TXT records. + +> dig +short TXT _dnsaddr.example.net +"dnsaddr=/ip6/2001:db8::a3/tcp/443/wss/ipfs/Qmfoo" +"dnsaddr=/ip6/2001:db8::a4/tcp/443/wss/ipfs/Qmbar" +"dnsaddr=/ip4/192.0.2.1/tcp/443/wss/ipfs/Qmfoo" +"dnsaddr=/ip4/192.0.2.2/tcp/443/wss/ipfs/Qmbar" +... + +# /dnsaddr returns addrs which encapsulate whatever /dnsaddr encapsulates too. + +> madns example.net/ipfs/Qmfoo +info: changing query to /dnsaddr/example.net/ipfs/Qmfoo +/ip6/2001:db8::a3/tcp/443/wss/ipfs/Qmfoo +/ip4/192.0.2.1/tcp/443/wss/ipfs/Qmfoo + +# TODO -p filters by protocol stacks. + +> madns -p /ip6/tcp/wss /dnsaddr/example.net +/ip6/2001:db8::a3/tcp/443/wss/ipfs/Qmfoo +/ip6/2001:db8::a4/tcp/443/wss/ipfs/Qmbar + +# TODO -c filters by CIDR +> madns -c /ip4/104.236.76.0/ipcidr/24 /dnsaddr/example.net +/ip4/192.0.2.2/tcp/443/wss/ipfs/Qmbar +``` diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/dns.go b/vendor/github.com/multiformats/go-multiaddr-dns/dns.go new file mode 100644 index 0000000000..4a5a93460c --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/dns.go @@ -0,0 +1,29 @@ +package madns + +import ( + ma "github.com/multiformats/go-multiaddr" +) + +// Extracted from source of truth for multicodec codes: https://github.com/multiformats/multicodec +const ( + // Deprecated: use ma.P_DNS + P_DNS = ma.P_DNS + // Deprecated: use ma.P_DNS4 + P_DNS4 = ma.P_DNS4 + // Deprecated: use ma.P_DNS6 + P_DNS6 = ma.P_DNS6 + // Deprecated: use ma.P_DNSADDR + P_DNSADDR = ma.P_DNSADDR +) + +// Deprecated: use ma.ProtocolWithCode(P_DNS) +var DnsProtocol = ma.ProtocolWithCode(P_DNS) + +// Deprecated: use ma.ProtocolWithCode(P_DNS4) +var Dns4Protocol = ma.ProtocolWithCode(P_DNS4) + +// Deprecated: use ma.ProtocolWithCode(P_DNS6) +var Dns6Protocol = ma.ProtocolWithCode(P_DNS6) + +// Deprecated: use ma.ProtocolWithCode(P_DNSADDR) +var DnsaddrProtocol = ma.ProtocolWithCode(P_DNSADDR) diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/go.mod b/vendor/github.com/multiformats/go-multiaddr-dns/go.mod new file mode 100644 index 0000000000..0824647482 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/go.mod @@ -0,0 +1,5 @@ +module github.com/multiformats/go-multiaddr-dns + +require github.com/multiformats/go-multiaddr v0.1.1 + +go 1.12 diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/go.sum b/vendor/github.com/multiformats/go-multiaddr-dns/go.sum new file mode 100644 index 0000000000..0061b94a7d --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/go.sum @@ -0,0 +1,20 @@ +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-multiaddr v0.1.1 h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multihash v0.0.8 h1:wrYcW5yxSi3dU07n5jnuS5PrNwyHy0zRHGVoUugWvXg= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/multiformats/go-multiaddr-dns/resolve.go b/vendor/github.com/multiformats/go-multiaddr-dns/resolve.go new file mode 100644 index 0000000000..64d8f707db --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr-dns/resolve.go @@ -0,0 +1,269 @@ +package madns + +import ( + "context" + "net" + "strings" + + ma "github.com/multiformats/go-multiaddr" +) + +var ResolvableProtocols = []ma.Protocol{DnsaddrProtocol, Dns4Protocol, Dns6Protocol, DnsProtocol} +var DefaultResolver = &Resolver{Backend: net.DefaultResolver} + +const dnsaddrTXTPrefix = "dnsaddr=" + +type backend interface { + LookupIPAddr(context.Context, string) ([]net.IPAddr, error) + LookupTXT(context.Context, string) ([]string, error) +} + +type Resolver struct { + Backend backend +} + +type MockBackend struct { + IP map[string][]net.IPAddr + TXT map[string][]string +} + +func (r *MockBackend) LookupIPAddr(ctx context.Context, name string) ([]net.IPAddr, error) { + results, ok := r.IP[name] + if ok { + return results, nil + } else { + return []net.IPAddr{}, nil + } +} + +func (r *MockBackend) LookupTXT(ctx context.Context, name string) ([]string, error) { + results, ok := r.TXT[name] + if ok { + return results, nil + } else { + return []string{}, nil + } +} + +func Matches(maddr ma.Multiaddr) (matches bool) { + ma.ForEach(maddr, func(c ma.Component) bool { + switch c.Protocol().Code { + case DnsProtocol.Code, Dns4Protocol.Code, Dns6Protocol.Code, DnsaddrProtocol.Code: + matches = true + } + return !matches + }) + return matches +} + +func Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) { + return DefaultResolver.Resolve(ctx, maddr) +} + +func (r *Resolver) Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) { + var results []ma.Multiaddr + for i := 0; maddr != nil; i++ { + var keep ma.Multiaddr + + // Find the next dns component. + keep, maddr = ma.SplitFunc(maddr, func(c ma.Component) bool { + switch c.Protocol().Code { + case DnsProtocol.Code, Dns4Protocol.Code, Dns6Protocol.Code, DnsaddrProtocol.Code: + return true + default: + return false + } + }) + + // Keep everything before the dns component. + if keep != nil { + if len(results) == 0 { + results = []ma.Multiaddr{keep} + } else { + for i, r := range results { + results[i] = r.Encapsulate(keep) + } + } + } + + // If the rest is empty, we've hit the end (there _was_ no dns component). + if maddr == nil { + break + } + + // split off the dns component. + var resolve *ma.Component + resolve, maddr = ma.SplitFirst(maddr) + + proto := resolve.Protocol() + value := resolve.Value() + + // resolve the dns component + var resolved []ma.Multiaddr + switch proto.Code { + case Dns4Protocol.Code, Dns6Protocol.Code, DnsProtocol.Code: + // The dns, dns4, and dns6 resolver simply resolves each + // dns* component into an ipv4/ipv6 address. + + v4only := proto.Code == Dns4Protocol.Code + v6only := proto.Code == Dns6Protocol.Code + + // XXX: Unfortunately, go does a pretty terrible job of + // differentiating between IPv6 and IPv4. A v4-in-v6 + // AAAA record will _look_ like an A record to us and + // there's nothing we can do about that. + records, err := r.Backend.LookupIPAddr(ctx, value) + if err != nil { + return nil, err + } + + // Convert each DNS record into a multiaddr. If the + // protocol is dns4, throw away any IPv6 addresses. If + // the protocol is dns6, throw away any IPv4 addresses. + + for _, r := range records { + var ( + rmaddr ma.Multiaddr + err error + ) + ip4 := r.IP.To4() + if ip4 == nil { + if v4only { + continue + } + rmaddr, err = ma.NewMultiaddr("/ip6/" + r.IP.String()) + } else { + if v6only { + continue + } + rmaddr, err = ma.NewMultiaddr("/ip4/" + ip4.String()) + } + if err != nil { + return nil, err + } + resolved = append(resolved, rmaddr) + } + case DnsaddrProtocol.Code: + // The dnsaddr resolver is a bit more complicated. We: + // + // 1. Lookup the dnsaddr txt record on _dnsaddr.DOMAIN.TLD + // 2. Take everything _after_ the `/dnsaddr/DOMAIN.TLD` + // part of the multiaddr. + // 3. Find the dnsaddr records (if any) with suffixes + // matching the result of step 2. + + // First, lookup the TXT record + records, err := r.Backend.LookupTXT(ctx, "_dnsaddr."+value) + if err != nil { + return nil, err + } + + // Then, calculate the length of the suffix we're + // looking for. + length := 0 + if maddr != nil { + length = addrLen(maddr) + } + + for _, r := range records { + // Ignore non dnsaddr TXT records. + if !strings.HasPrefix(r, dnsaddrTXTPrefix) { + continue + } + + // Extract and decode the multiaddr. + rmaddr, err := ma.NewMultiaddr(r[len(dnsaddrTXTPrefix):]) + if err != nil { + // discard multiaddrs we don't understand. + // XXX: Is this right? It's the best we + // can do for now, really. + continue + } + + // If we have a suffix to match on. + if maddr != nil { + // Make sure the new address is at least + // as long as the suffix we're looking + // for. + rmlen := addrLen(rmaddr) + if rmlen < length { + // not long enough. + continue + } + + // Matches everything after the /dnsaddr/... with the end of the + // dnsaddr record: + // + // v----------rmlen-----------------v + // /ip4/1.2.3.4/tcp/1234/p2p/QmFoobar + // /p2p/QmFoobar + // ^--(rmlen - length)--^---length--^ + if !maddr.Equal(offset(rmaddr, rmlen-length)) { + continue + } + } + + resolved = append(resolved, rmaddr) + } + + // consumes the rest of the multiaddr as part of the "match" process. + maddr = nil + default: + panic("unreachable") + } + + if len(resolved) == 0 { + return nil, nil + } else if len(results) == 0 { + results = resolved + } else { + // We take the cross product here as we don't have any + // better way to represent "ORs" in multiaddrs. For + // example, `/dns/foo.com/p2p-circuit/dns/bar.com` could + // resolve to: + // + // * /ip4/1.1.1.1/p2p-circuit/ip4/2.1.1.1 + // * /ip4/1.1.1.1/p2p-circuit/ip4/2.1.1.2 + // * /ip4/1.1.1.2/p2p-circuit/ip4/2.1.1.1 + // * /ip4/1.1.1.2/p2p-circuit/ip4/2.1.1.2 + results = cross(results, resolved) + } + } + + return results, nil +} + +// counts the number of components in the multiaddr +func addrLen(maddr ma.Multiaddr) int { + length := 0 + ma.ForEach(maddr, func(_ ma.Component) bool { + length++ + return true + }) + return length +} + +// trims `offset` components from the beginning of the multiaddr. +func offset(maddr ma.Multiaddr, offset int) ma.Multiaddr { + _, after := ma.SplitFunc(maddr, func(c ma.Component) bool { + if offset == 0 { + return true + } + offset-- + return false + }) + return after +} + +// takes the cross product of two sets of multiaddrs +// +// assumes `a` is non-empty. +func cross(a, b []ma.Multiaddr) []ma.Multiaddr { + res := make([]ma.Multiaddr, 0, len(a)*len(b)) + for _, x := range a { + for _, y := range b { + res = append(res, x.Encapsulate(y)) + } + } + return res +} diff --git a/vendor/github.com/multiformats/go-multiaddr/.gitignore b/vendor/github.com/multiformats/go-multiaddr/.gitignore new file mode 100644 index 0000000000..699d271b02 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/.gitignore @@ -0,0 +1,3 @@ +.vscode/ +multiaddr/multiaddr +tmp/ diff --git a/vendor/github.com/multiformats/go-multiaddr/.travis.yml b/vendor/github.com/multiformats/go-multiaddr/.travis.yml new file mode 100644 index 0000000000..95e4daaa06 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/.travis.yml @@ -0,0 +1,31 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + - GO111MODULE=on + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + - make conformance + +cache: + directories: + - $GOPATH/pkg/mod + - /home/travis/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/multiformats/go-multiaddr/LICENSE b/vendor/github.com/multiformats/go-multiaddr/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-multiaddr/Makefile b/vendor/github.com/multiformats/go-multiaddr/Makefile new file mode 100644 index 0000000000..fa5197afaf --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/Makefile @@ -0,0 +1,12 @@ +conformance: tmp/multiaddr + go build -o tmp/multiaddr/test/go-multiaddr ./multiaddr + cd tmp/multiaddr/test && MULTIADDR_BIN="./go-multiaddr" go test -v + +tmp/multiaddr: + mkdir -p tmp/ + git clone https://github.com/multiformats/multiaddr tmp/multiaddr/ + +clean: + rm -rf tmp/ + +.PHONY: conformance clean diff --git a/vendor/github.com/multiformats/go-multiaddr/README.md b/vendor/github.com/multiformats/go-multiaddr/README.md new file mode 100644 index 0000000000..df2766aa18 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/README.md @@ -0,0 +1,117 @@ +# go-multiaddr + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/multiformats/go-multiaddr?status.svg)](https://godoc.org/github.com/multiformats/go-multiaddr) +[![Travis CI](https://img.shields.io/travis/multiformats/go-multiaddr.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-multiaddr) +[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-multiaddr.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-multiaddr?branch=master) + +> [multiaddr](https://github.com/multiformats/multiaddr) implementation in go + +Multiaddr is a standard way to represent addresses that: + +- Support any standard network protocols. +- Self-describe (include protocols). +- Have a binary packed format. +- Have a nice string representation. +- Encapsulate well. + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) + - [Example](#example) + - [Simple](#simple) + - [Protocols](#protocols) + - [En/decapsulate](#endecapsulate) + - [Tunneling](#tunneling) +- [Maintainers](#maintainers) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +go get github.com/multiformats/go-multiaddr +``` + +## Usage + +### Example + +#### Simple + +```go +import ma "github.com/multiformats/go-multiaddr" + +// construct from a string (err signals parse failure) +m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") + +// construct from bytes (err signals parse failure) +m2, err := ma.NewMultiaddrBytes(m1.Bytes()) + +// true +strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234") +strings.Equal(m1.String(), m2.String()) +bytes.Equal(m1.Bytes(), m2.Bytes()) +m1.Equal(m2) +m2.Equal(m1) +``` + +#### Protocols + +```go +// get the multiaddr protocol description objects +m1.Protocols() +// []Protocol{ +// Protocol{ Code: 4, Name: 'ip4', Size: 32}, +// Protocol{ Code: 17, Name: 'udp', Size: 16}, +// } +``` + +#### En/decapsulate + +```go +import ma "github.com/multiformats/go-multiaddr" + +m, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") +// + +sctpMA, err := ma.NewMultiaddr("/sctp/5678") + +m.Encapsulate(sctpMA) +// + +udpMA, err := ma.NewMultiaddr("/udp/1234") + +m.Decapsulate(udpMA) // up to + inc last occurrence of subaddr +// +``` + +#### Tunneling + +Multiaddr allows expressing tunnels very nicely. + +```js +printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80") +proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443") +printerOverProxy := proxy.Encapsulate(printer) +// /ip4/10.20.30.40/tcp/443/ip4/192.168.0.13/tcp/80 + +proxyAgain := printerOverProxy.Decapsulate(printer) +// /ip4/10.20.30.40/tcp/443 +``` + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multiaddr/issues). + +Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +[MIT](LICENSE) © 2014 Juan Batiz-Benet diff --git a/vendor/github.com/multiformats/go-multiaddr/codec.go b/vendor/github.com/multiformats/go-multiaddr/codec.go new file mode 100644 index 0000000000..e6b74479cc --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/codec.go @@ -0,0 +1,204 @@ +package multiaddr + +import ( + "bytes" + "fmt" + "strings" + + "github.com/multiformats/go-varint" +) + +func stringToBytes(s string) ([]byte, error) { + // consume trailing slashes + s = strings.TrimRight(s, "/") + + var b bytes.Buffer + sp := strings.Split(s, "/") + + if sp[0] != "" { + return nil, fmt.Errorf("failed to parse multiaddr %q: must begin with /", s) + } + + // consume first empty elem + sp = sp[1:] + + if len(sp) == 0 { + return nil, fmt.Errorf("failed to parse multiaddr %q: empty multiaddr", s) + } + + for len(sp) > 0 { + name := sp[0] + p := ProtocolWithName(name) + if p.Code == 0 { + return nil, fmt.Errorf("failed to parse multiaddr %q: unknown protocol %s", s, sp[0]) + } + _, _ = b.Write(p.VCode) + sp = sp[1:] + + if p.Size == 0 { // no length. + continue + } + + if len(sp) < 1 { + return nil, fmt.Errorf("failed to parse multiaddr %q: unexpected end of multiaddr", s) + } + + if p.Path { + // it's a path protocol (terminal). + // consume the rest of the address as the next component. + sp = []string{"/" + strings.Join(sp, "/")} + } + + a, err := p.Transcoder.StringToBytes(sp[0]) + if err != nil { + return nil, fmt.Errorf("failed to parse multiaddr %q: invalid value %q for protocol %s: %s", s, sp[0], p.Name, err) + } + if p.Size < 0 { // varint size. + _, _ = b.Write(varint.ToUvarint(uint64(len(a)))) + } + b.Write(a) + sp = sp[1:] + } + + return b.Bytes(), nil +} + +func validateBytes(b []byte) (err error) { + if len(b) == 0 { + return fmt.Errorf("empty multiaddr") + } + for len(b) > 0 { + code, n, err := ReadVarintCode(b) + if err != nil { + return err + } + + b = b[n:] + p := ProtocolWithCode(code) + if p.Code == 0 { + return fmt.Errorf("no protocol with code %d", code) + } + + if p.Size == 0 { + continue + } + + n, size, err := sizeForAddr(p, b) + if err != nil { + return err + } + + b = b[n:] + + if len(b) < size || size < 0 { + return fmt.Errorf("invalid value for size %d", len(b)) + } + + err = p.Transcoder.ValidateBytes(b[:size]) + if err != nil { + return err + } + + b = b[size:] + } + + return nil +} + +func readComponent(b []byte) (int, Component, error) { + var offset int + code, n, err := ReadVarintCode(b) + if err != nil { + return 0, Component{}, err + } + offset += n + + p := ProtocolWithCode(code) + if p.Code == 0 { + return 0, Component{}, fmt.Errorf("no protocol with code %d", code) + } + + if p.Size == 0 { + return offset, Component{ + bytes: b[:offset], + offset: offset, + protocol: p, + }, nil + } + + n, size, err := sizeForAddr(p, b[offset:]) + if err != nil { + return 0, Component{}, err + } + + offset += n + + if len(b[offset:]) < size || size < 0 { + return 0, Component{}, fmt.Errorf("invalid value for size %d", len(b[offset:])) + } + + return offset + size, Component{ + bytes: b[:offset+size], + protocol: p, + offset: offset, + }, nil +} + +func bytesToString(b []byte) (ret string, err error) { + if len(b) == 0 { + return "", fmt.Errorf("empty multiaddr") + } + var buf strings.Builder + + for len(b) > 0 { + n, c, err := readComponent(b) + if err != nil { + return "", err + } + b = b[n:] + c.writeTo(&buf) + } + + return buf.String(), nil +} + +func sizeForAddr(p Protocol, b []byte) (skip, size int, err error) { + switch { + case p.Size > 0: + return 0, (p.Size / 8), nil + case p.Size == 0: + return 0, 0, nil + default: + size, n, err := ReadVarintCode(b) + if err != nil { + return 0, 0, err + } + return n, size, nil + } +} + +func bytesSplit(b []byte) ([][]byte, error) { + var ret [][]byte + for len(b) > 0 { + code, n, err := ReadVarintCode(b) + if err != nil { + return nil, err + } + + p := ProtocolWithCode(code) + if p.Code == 0 { + return nil, fmt.Errorf("no protocol with code %d", b[0]) + } + + n2, size, err := sizeForAddr(p, b[n:]) + if err != nil { + return nil, err + } + + length := n + n2 + size + ret = append(ret, b[:length]) + b = b[length:] + } + + return ret, nil +} diff --git a/vendor/github.com/multiformats/go-multiaddr/codecov.yml b/vendor/github.com/multiformats/go-multiaddr/codecov.yml new file mode 100644 index 0000000000..ca8100ab11 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/codecov.yml @@ -0,0 +1,2 @@ +ignore: + - "multiaddr" diff --git a/vendor/github.com/multiformats/go-multiaddr/component.go b/vendor/github.com/multiformats/go-multiaddr/component.go new file mode 100644 index 0000000000..490b8ac90e --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/component.go @@ -0,0 +1,183 @@ +package multiaddr + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "strings" + + "github.com/multiformats/go-varint" +) + +// Component is a single multiaddr Component. +type Component struct { + bytes []byte + protocol Protocol + offset int +} + +func (c *Component) Bytes() []byte { + return c.bytes +} + +func (c *Component) MarshalBinary() ([]byte, error) { + return c.Bytes(), nil +} + +func (c *Component) UnmarshalBinary(data []byte) error { + _, comp, err := readComponent(data) + if err != nil { + return err + } + *c = comp + return nil +} + +func (c *Component) MarshalText() ([]byte, error) { + return []byte(c.String()), nil +} + +func (c *Component) UnmarshalText(data []byte) error { + bytes, err := stringToBytes(string(data)) + if err != nil { + return err + } + _, comp, err := readComponent(bytes) + if err != nil { + return err + } + *c = comp + return nil +} + +func (c *Component) MarshalJSON() ([]byte, error) { + txt, err := c.MarshalText() + if err != nil { + return nil, err + } + + return json.Marshal(string(txt)) +} + +func (m *Component) UnmarshalJSON(data []byte) error { + var v string + if err := json.Unmarshal(data, &v); err != nil { + return err + } + + return m.UnmarshalText([]byte(v)) +} + +func (c *Component) Equal(o Multiaddr) bool { + return bytes.Equal(c.bytes, o.Bytes()) +} + +func (c *Component) Protocols() []Protocol { + return []Protocol{c.protocol} +} + +func (c *Component) Decapsulate(o Multiaddr) Multiaddr { + if c.Equal(o) { + return nil + } + return c +} + +func (c *Component) Encapsulate(o Multiaddr) Multiaddr { + m := &multiaddr{bytes: c.bytes} + return m.Encapsulate(o) +} + +func (c *Component) ValueForProtocol(code int) (string, error) { + if c.protocol.Code != code { + return "", ErrProtocolNotFound + } + return c.Value(), nil +} + +func (c *Component) Protocol() Protocol { + return c.protocol +} + +func (c *Component) RawValue() []byte { + return c.bytes[c.offset:] +} + +func (c *Component) Value() string { + if c.protocol.Transcoder == nil { + return "" + } + value, err := c.protocol.Transcoder.BytesToString(c.bytes[c.offset:]) + if err != nil { + // This Component must have been checked. + panic(err) + } + return value +} + +func (c *Component) String() string { + var b strings.Builder + c.writeTo(&b) + return b.String() +} + +// writeTo is an efficient, private function for string-formatting a multiaddr. +// Trust me, we tend to allocate a lot when doing this. +func (c *Component) writeTo(b *strings.Builder) { + b.WriteByte('/') + b.WriteString(c.protocol.Name) + value := c.Value() + if len(value) == 0 { + return + } + if !(c.protocol.Path && value[0] == '/') { + b.WriteByte('/') + } + b.WriteString(value) +} + +// NewComponent constructs a new multiaddr component +func NewComponent(protocol, value string) (*Component, error) { + p := ProtocolWithName(protocol) + if p.Code == 0 { + return nil, fmt.Errorf("unsupported protocol: %s", protocol) + } + if p.Transcoder != nil { + bts, err := p.Transcoder.StringToBytes(value) + if err != nil { + return nil, err + } + return newComponent(p, bts), nil + } else if value != "" { + return nil, fmt.Errorf("protocol %s doesn't take a value", p.Name) + } + return newComponent(p, nil), nil + // TODO: handle path /? +} + +func newComponent(protocol Protocol, bvalue []byte) *Component { + size := len(bvalue) + size += len(protocol.VCode) + if protocol.Size < 0 { + size += varint.UvarintSize(uint64(len(bvalue))) + } + maddr := make([]byte, size) + var offset int + offset += copy(maddr[offset:], protocol.VCode) + if protocol.Size < 0 { + offset += binary.PutUvarint(maddr[offset:], uint64(len(bvalue))) + } + copy(maddr[offset:], bvalue) + + // For debugging + if len(maddr) != offset+len(bvalue) { + panic("incorrect length") + } + + return &Component{ + bytes: maddr, + protocol: protocol, + offset: offset, + } +} diff --git a/vendor/github.com/multiformats/go-multiaddr/doc.go b/vendor/github.com/multiformats/go-multiaddr/doc.go new file mode 100644 index 0000000000..d8c37b2651 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/doc.go @@ -0,0 +1,36 @@ +/* +Package multiaddr provides an implementation of the Multiaddr network +address format. Multiaddr emphasizes explicitness, self-description, and +portability. It allows applications to treat addresses as opaque tokens, +and to avoid making assumptions about the address representation (e.g. length). +Learn more at https://github.com/multiformats/multiaddr + +Basic Use: + + import ( + "bytes" + "strings" + ma "github.com/multiformats/go-multiaddr" + ) + + // construct from a string (err signals parse failure) + m1, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234") + + // construct from bytes (err signals parse failure) + m2, err := ma.NewMultiaddrBytes(m1.Bytes()) + + // true + strings.Equal(m1.String(), "/ip4/127.0.0.1/udp/1234") + strings.Equal(m1.String(), m2.String()) + bytes.Equal(m1.Bytes(), m2.Bytes()) + m1.Equal(m2) + m2.Equal(m1) + + // tunneling (en/decap) + printer, _ := ma.NewMultiaddr("/ip4/192.168.0.13/tcp/80") + proxy, _ := ma.NewMultiaddr("/ip4/10.20.30.40/tcp/443") + printerOverProxy := proxy.Encapsulate(printer) + proxyAgain := printerOverProxy.Decapsulate(printer) + +*/ +package multiaddr diff --git a/vendor/github.com/multiformats/go-multiaddr/filter.go b/vendor/github.com/multiformats/go-multiaddr/filter.go new file mode 100644 index 0000000000..6751202a86 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/filter.go @@ -0,0 +1,179 @@ +package multiaddr + +import ( + "net" + "sync" +) + +// Action is an enum modelling all possible filter actions. +type Action int32 + +const ( + ActionNone Action = iota // zero value. + ActionAccept + ActionDeny +) + +type filterEntry struct { + f net.IPNet + action Action +} + +// Filters is a structure representing a collection of accept/deny +// net.IPNet filters, together with the DefaultAction flag, which +// represents the default filter policy. +// +// Note that the last policy added to the Filters is authoritative. +type Filters struct { + DefaultAction Action + + mu sync.RWMutex + filters []*filterEntry +} + +// NewFilters constructs and returns a new set of net.IPNet filters. +// By default, the new filter accepts all addresses. +func NewFilters() *Filters { + return &Filters{ + DefaultAction: ActionAccept, + filters: make([]*filterEntry, 0), + } +} + +func (fs *Filters) find(ipnet net.IPNet) (int, *filterEntry) { + s := ipnet.String() + for idx, ft := range fs.filters { + if ft.f.String() == s { + return idx, ft + } + } + return -1, nil +} + +// AddDialFilter adds a deny rule to this Filters set. Hosts +// matching the given net.IPNet filter will be denied, unless +// another rule is added which states that they should be accepted. +// +// No effort is made to prevent duplication of filters, or to simplify +// the filters list. +// +// Deprecated: Use AddFilter(). +func (fs *Filters) AddDialFilter(f *net.IPNet) { + fs.AddFilter(*f, ActionDeny) +} + +// AddFilter adds a rule to the Filters set, enforcing the desired action for +// the provided IPNet mask. +func (fs *Filters) AddFilter(ipnet net.IPNet, action Action) { + fs.mu.Lock() + defer fs.mu.Unlock() + + if _, f := fs.find(ipnet); f != nil { + f.action = action + } else { + fs.filters = append(fs.filters, &filterEntry{ipnet, action}) + } +} + +// RemoveLiteral removes the first filter associated with the supplied IPNet, +// returning whether something was removed or not. It makes no distinction +// between whether the rule is an accept or a deny. +// +// Deprecated: use RemoveLiteral() instead. +func (fs *Filters) Remove(ipnet *net.IPNet) (removed bool) { + return fs.RemoveLiteral(*ipnet) +} + +// RemoveLiteral removes the first filter associated with the supplied IPNet, +// returning whether something was removed or not. It makes no distinction +// between whether the rule is an accept or a deny. +func (fs *Filters) RemoveLiteral(ipnet net.IPNet) (removed bool) { + fs.mu.Lock() + defer fs.mu.Unlock() + + if idx, _ := fs.find(ipnet); idx != -1 { + fs.filters = append(fs.filters[:idx], fs.filters[idx+1:]...) + return true + } + return false +} + +// AddrBlocked parses a ma.Multiaddr and, if a valid netip is found, it applies the +// Filter set rules, returning true if the given address should be denied, and false if +// the given address is accepted. +// +// If a parsing error occurs, or no filter matches, the Filters' +// default is returned. +// +// TODO: currently, the last filter to match wins always, but it shouldn't be that way. +// Instead, the highest-specific last filter should win; that way more specific filters +// override more general ones. +func (fs *Filters) AddrBlocked(a Multiaddr) (deny bool) { + var ( + netip net.IP + found bool + ) + + ForEach(a, func(c Component) bool { + switch c.Protocol().Code { + case P_IP6ZONE: + return true + case P_IP6, P_IP4: + found = true + netip = net.IP(c.RawValue()) + return false + default: + return false + } + }) + + if !found { + return fs.DefaultAction == ActionDeny + } + + fs.mu.RLock() + defer fs.mu.RUnlock() + + action := fs.DefaultAction + for _, ft := range fs.filters { + if ft.f.Contains(netip) { + action = ft.action + } + } + + return action == ActionDeny +} + +// Filters returns the list of DENY net.IPNet masks. For backwards compatibility. +// +// A copy of the filters is made prior to returning, so the inner state is not exposed. +// +// Deprecated: Use FiltersForAction(). +func (fs *Filters) Filters() (result []*net.IPNet) { + ffa := fs.FiltersForAction(ActionDeny) + for _, res := range ffa { + res := res // allocate a new copy + result = append(result, &res) + } + return result +} + +func (fs *Filters) ActionForFilter(ipnet net.IPNet) (action Action, ok bool) { + if _, f := fs.find(ipnet); f != nil { + return f.action, true + } + return ActionNone, false +} + +// FiltersForAction returns the filters associated with the indicated action. +func (fs *Filters) FiltersForAction(action Action) (result []net.IPNet) { + fs.mu.RLock() + defer fs.mu.RUnlock() + + for _, ff := range fs.filters { + if ff.action == action { + result = append(result, ff.f) + } + } + return result +} diff --git a/vendor/github.com/multiformats/go-multiaddr/go.mod b/vendor/github.com/multiformats/go-multiaddr/go.mod new file mode 100644 index 0000000000..f9ee6fb7cd --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/go.mod @@ -0,0 +1,11 @@ +module github.com/multiformats/go-multiaddr + +go 1.13 + +require ( + github.com/ipfs/go-cid v0.0.7 + github.com/libp2p/go-maddr-filter v0.1.0 + github.com/multiformats/go-multiaddr-net v0.2.0 + github.com/multiformats/go-multihash v0.0.14 + github.com/multiformats/go-varint v0.0.6 +) diff --git a/vendor/github.com/multiformats/go-multiaddr/go.sum b/vendor/github.com/multiformats/go-multiaddr/go.sum new file mode 100644 index 0000000000..1818ca1d8f --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/go.sum @@ -0,0 +1,38 @@ +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/libp2p/go-maddr-filter v0.1.0 h1:4ACqZKw8AqiuJfwFGq1CYDFugfXTOos+qQ3DETkhtCE= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/multiformats/go-multiaddr/interface.go b/vendor/github.com/multiformats/go-multiaddr/interface.go new file mode 100644 index 0000000000..82cc764010 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/interface.go @@ -0,0 +1,62 @@ +package multiaddr + +import ( + "encoding" + "encoding/json" +) + +/* +Multiaddr is a cross-protocol, cross-platform format for representing +internet addresses. It emphasizes explicitness and self-description. +Learn more here: https://github.com/multiformats/multiaddr + +Multiaddrs have both a binary and string representation. + + import ma "github.com/multiformats/go-multiaddr" + + addr, err := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/80") + // err non-nil when parsing failed. + +*/ +type Multiaddr interface { + json.Marshaler + json.Unmarshaler + encoding.TextMarshaler + encoding.TextUnmarshaler + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler + + // Equal returns whether two Multiaddrs are exactly equal + Equal(Multiaddr) bool + + // Bytes returns the []byte representation of this Multiaddr + // + // This function may expose immutable, internal state. Do not modify. + Bytes() []byte + + // String returns the string representation of this Multiaddr + // (may panic if internal state is corrupted) + String() string + + // Protocols returns the list of Protocols this Multiaddr includes + // will panic if protocol code incorrect (and bytes accessed incorrectly) + Protocols() []Protocol + + // Encapsulate wraps this Multiaddr around another. For example: + // + // /ip4/1.2.3.4 encapsulate /tcp/80 = /ip4/1.2.3.4/tcp/80 + // + Encapsulate(Multiaddr) Multiaddr + + // Decapsultate removes a Multiaddr wrapping. For example: + // + // /ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80 + // + Decapsulate(Multiaddr) Multiaddr + + // ValueForProtocol returns the value (if any) following the specified protocol + // + // Note: protocols can appear multiple times in a single multiaddr. + // Consider using `ForEach` to walk over the addr manually. + ValueForProtocol(code int) (string, error) +} diff --git a/vendor/github.com/multiformats/go-multiaddr/multiaddr.go b/vendor/github.com/multiformats/go-multiaddr/multiaddr.go new file mode 100644 index 0000000000..58fe8cecbc --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/multiaddr.go @@ -0,0 +1,186 @@ +package multiaddr + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "strings" +) + +// multiaddr is the data structure representing a Multiaddr +type multiaddr struct { + bytes []byte +} + +// NewMultiaddr parses and validates an input string, returning a *Multiaddr +func NewMultiaddr(s string) (a Multiaddr, err error) { + defer func() { + if e := recover(); e != nil { + log.Printf("Panic in NewMultiaddr on input %q: %s", s, e) + err = fmt.Errorf("%v", e) + } + }() + b, err := stringToBytes(s) + if err != nil { + return nil, err + } + return &multiaddr{bytes: b}, nil +} + +// NewMultiaddrBytes initializes a Multiaddr from a byte representation. +// It validates it as an input string. +func NewMultiaddrBytes(b []byte) (a Multiaddr, err error) { + defer func() { + if e := recover(); e != nil { + log.Printf("Panic in NewMultiaddrBytes on input %q: %s", b, e) + err = fmt.Errorf("%v", e) + } + }() + + if err := validateBytes(b); err != nil { + return nil, err + } + + return &multiaddr{bytes: b}, nil +} + +// Equal tests whether two multiaddrs are equal +func (m *multiaddr) Equal(m2 Multiaddr) bool { + return bytes.Equal(m.bytes, m2.Bytes()) +} + +// Bytes returns the []byte representation of this Multiaddr +// +// Do not modify the returned buffer, it may be shared. +func (m *multiaddr) Bytes() []byte { + return m.bytes +} + +// String returns the string representation of a Multiaddr +func (m *multiaddr) String() string { + s, err := bytesToString(m.bytes) + if err != nil { + panic(fmt.Errorf("multiaddr failed to convert back to string. corrupted? %s", err)) + } + return s +} + +func (m *multiaddr) MarshalBinary() ([]byte, error) { + return m.Bytes(), nil +} + +func (m *multiaddr) UnmarshalBinary(data []byte) error { + new, err := NewMultiaddrBytes(data) + if err != nil { + return err + } + *m = *(new.(*multiaddr)) + return nil +} + +func (m *multiaddr) MarshalText() ([]byte, error) { + return []byte(m.String()), nil +} + +func (m *multiaddr) UnmarshalText(data []byte) error { + new, err := NewMultiaddr(string(data)) + if err != nil { + return err + } + *m = *(new.(*multiaddr)) + return nil +} + +func (m *multiaddr) MarshalJSON() ([]byte, error) { + return json.Marshal(m.String()) +} + +func (m *multiaddr) UnmarshalJSON(data []byte) error { + var v string + if err := json.Unmarshal(data, &v); err != nil { + return err + } + new, err := NewMultiaddr(v) + *m = *(new.(*multiaddr)) + return err +} + +// Protocols returns the list of protocols this Multiaddr has. +// will panic in case we access bytes incorrectly. +func (m *multiaddr) Protocols() []Protocol { + ps := make([]Protocol, 0, 8) + b := m.bytes + for len(b) > 0 { + code, n, err := ReadVarintCode(b) + if err != nil { + panic(err) + } + + p := ProtocolWithCode(code) + if p.Code == 0 { + // this is a panic (and not returning err) because this should've been + // caught on constructing the Multiaddr + panic(fmt.Errorf("no protocol with code %d", b[0])) + } + ps = append(ps, p) + b = b[n:] + + n, size, err := sizeForAddr(p, b) + if err != nil { + panic(err) + } + + b = b[n+size:] + } + return ps +} + +// Encapsulate wraps a given Multiaddr, returning the resulting joined Multiaddr +func (m *multiaddr) Encapsulate(o Multiaddr) Multiaddr { + mb := m.bytes + ob := o.Bytes() + + b := make([]byte, len(mb)+len(ob)) + copy(b, mb) + copy(b[len(mb):], ob) + return &multiaddr{bytes: b} +} + +// Decapsulate unwraps Multiaddr up until the given Multiaddr is found. +func (m *multiaddr) Decapsulate(o Multiaddr) Multiaddr { + s1 := m.String() + s2 := o.String() + i := strings.LastIndex(s1, s2) + if i < 0 { + // if multiaddr not contained, returns a copy. + cpy := make([]byte, len(m.bytes)) + copy(cpy, m.bytes) + return &multiaddr{bytes: cpy} + } + + if i == 0 { + return nil + } + + ma, err := NewMultiaddr(s1[:i]) + if err != nil { + panic("Multiaddr.Decapsulate incorrect byte boundaries.") + } + return ma +} + +var ErrProtocolNotFound = fmt.Errorf("protocol not found in multiaddr") + +func (m *multiaddr) ValueForProtocol(code int) (value string, err error) { + err = ErrProtocolNotFound + ForEach(m, func(c Component) bool { + if c.Protocol().Code == code { + value = c.Value() + err = nil + return false + } + return true + }) + return +} diff --git a/vendor/github.com/multiformats/go-multiaddr/package.json b/vendor/github.com/multiformats/go-multiaddr/package.json new file mode 100644 index 0000000000..c493b27e54 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/package.json @@ -0,0 +1,23 @@ +{ + "author": "multiformats", + "bugs": { + "url": "https://github.com/multiformats/go-multiaddr/issues" + }, + "gx": { + "dvcsimport": "github.com/multiformats/go-multiaddr" + }, + "gxDependencies": [ + { + "hash": "QmerPMzPk1mJVowm8KgmoknWa4yCYvvugMPsgWmDNUvDLW", + "name": "go-multihash", + "version": "1.0.9" + } + ], + "gxVersion": "0.9.0", + "language": "go", + "license": "MIT", + "name": "go-multiaddr", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "1.4.1" +} + diff --git a/vendor/github.com/multiformats/go-multiaddr/protocol.go b/vendor/github.com/multiformats/go-multiaddr/protocol.go new file mode 100644 index 0000000000..4be1b4e224 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/protocol.go @@ -0,0 +1,119 @@ +package multiaddr + +import ( + "fmt" + "strings" +) + +// These are special sizes +const ( + LengthPrefixedVarSize = -1 +) + +// Protocol is a Multiaddr protocol description structure. +type Protocol struct { + // Name is the string representation of the protocol code. E.g., ip4, + // ip6, tcp, udp, etc. + Name string + + // Code is the protocol's multicodec (a normal, non-varint number). + Code int + + // VCode is a precomputed varint encoded version of Code. + VCode []byte + + // Size is the size of the argument to this protocol. + // + // * Size == 0 means this protocol takes no argument. + // * Size > 0 means this protocol takes a constant sized argument. + // * Size < 0 means this protocol takes a variable length, varint + // prefixed argument. + Size int // a size of -1 indicates a length-prefixed variable size + + // Path indicates a path protocol (e.g., unix). When parsing multiaddr + // strings, path protocols consume the remainder of the address instead + // of stopping at the next forward slash. + // + // Size must be LengthPrefixedVarSize. + Path bool + + // Transcoder converts between the byte representation and the string + // representation of this protocol's argument (if any). + // + // This should only be non-nil if Size != 0 + Transcoder Transcoder +} + +var protocolsByName = map[string]Protocol{} +var protocolsByCode = map[int]Protocol{} + +// Protocols is the list of multiaddr protocols supported by this module. +var Protocols = []Protocol{} + +// SwapToP2pMultiaddrs is a function to make the transition from /ipfs/... +// multiaddrs to /p2p/... multiaddrs easier +// The first stage of the rollout is to ship this package to all users so +// that all users of multiaddr can parse both /ipfs/ and /p2p/ multiaddrs +// as the same code (P_P2P). During this stage of the rollout, all addresses +// with P_P2P will continue printing as /ipfs/, so that older clients without +// the new parsing code won't break. +// Once the network has adopted the new parsing code broadly enough, users of +// multiaddr can add a call to this method to an init function in their codebase. +// This will cause any P_P2P multiaddr to print out as /p2p/ instead of /ipfs/. +// Note that the binary serialization of this multiaddr does not change at any +// point. This means that this code is not a breaking network change at any point +// +// DEPRECATED: this is now the default +func SwapToP2pMultiaddrs() { +} + +func AddProtocol(p Protocol) error { + if _, ok := protocolsByName[p.Name]; ok { + return fmt.Errorf("protocol by the name %q already exists", p.Name) + } + + if _, ok := protocolsByCode[p.Code]; ok { + return fmt.Errorf("protocol code %d already taken by %q", p.Code, p.Code) + } + + if p.Size != 0 && p.Transcoder == nil { + return fmt.Errorf("protocols with arguments must define transcoders") + } + if p.Path && p.Size >= 0 { + return fmt.Errorf("path protocols must have variable-length sizes") + } + + Protocols = append(Protocols, p) + protocolsByName[p.Name] = p + protocolsByCode[p.Code] = p + return nil +} + +// ProtocolWithName returns the Protocol description with given string name. +func ProtocolWithName(s string) Protocol { + return protocolsByName[s] +} + +// ProtocolWithCode returns the Protocol description with given protocol code. +func ProtocolWithCode(c int) Protocol { + return protocolsByCode[c] +} + +// ProtocolsWithString returns a slice of protocols matching given string. +func ProtocolsWithString(s string) ([]Protocol, error) { + s = strings.Trim(s, "/") + sp := strings.Split(s, "/") + if len(sp) == 0 { + return nil, nil + } + + t := make([]Protocol, len(sp)) + for i, name := range sp { + p := ProtocolWithName(name) + if p.Code == 0 { + return nil, fmt.Errorf("no protocol with name: %s", name) + } + t[i] = p + } + return t, nil +} diff --git a/vendor/github.com/multiformats/go-multiaddr/protocols.go b/vendor/github.com/multiformats/go-multiaddr/protocols.go new file mode 100644 index 0000000000..d6df859504 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/protocols.go @@ -0,0 +1,249 @@ +package multiaddr + +// You **MUST** register your multicodecs with +// https://github.com/multiformats/multicodec before adding them here. +const ( + P_IP4 = 0x0004 + P_TCP = 0x0006 + P_DNS = 0x0035 // 4 or 6 + P_DNS4 = 0x0036 + P_DNS6 = 0x0037 + P_DNSADDR = 0x0038 + P_UDP = 0x0111 + P_DCCP = 0x0021 + P_IP6 = 0x0029 + P_IP6ZONE = 0x002A + P_QUIC = 0x01CC + P_SCTP = 0x0084 + P_CIRCUIT = 0x0122 + P_UDT = 0x012D + P_UTP = 0x012E + P_UNIX = 0x0190 + P_P2P = 0x01A5 + P_IPFS = 0x01A5 // alias for backwards compatability + P_HTTP = 0x01E0 + P_HTTPS = 0x01BB + P_ONION = 0x01BC // also for backwards compatibility + P_ONION3 = 0x01BD + P_GARLIC64 = 0x01BE + P_GARLIC32 = 0x01BF + P_P2P_WEBRTC_DIRECT = 0x0114 + P_WS = 0x01DD + P_WSS = 0x01DE +) + +var ( + protoIP4 = Protocol{ + Name: "ip4", + Code: P_IP4, + VCode: CodeToVarint(P_IP4), + Size: 32, + Path: false, + Transcoder: TranscoderIP4, + } + protoTCP = Protocol{ + Name: "tcp", + Code: P_TCP, + VCode: CodeToVarint(P_TCP), + Size: 16, + Path: false, + Transcoder: TranscoderPort, + } + protoDNS = Protocol{ + Code: P_DNS, + Size: LengthPrefixedVarSize, + Name: "dns", + VCode: CodeToVarint(P_DNS), + Transcoder: TranscoderDns, + } + protoDNS4 = Protocol{ + Code: P_DNS4, + Size: LengthPrefixedVarSize, + Name: "dns4", + VCode: CodeToVarint(P_DNS4), + Transcoder: TranscoderDns, + } + protoDNS6 = Protocol{ + Code: P_DNS6, + Size: LengthPrefixedVarSize, + Name: "dns6", + VCode: CodeToVarint(P_DNS6), + Transcoder: TranscoderDns, + } + protoDNSADDR = Protocol{ + Code: P_DNSADDR, + Size: LengthPrefixedVarSize, + Name: "dnsaddr", + VCode: CodeToVarint(P_DNSADDR), + Transcoder: TranscoderDns, + } + protoUDP = Protocol{ + Name: "udp", + Code: P_UDP, + VCode: CodeToVarint(P_UDP), + Size: 16, + Path: false, + Transcoder: TranscoderPort, + } + protoDCCP = Protocol{ + Name: "dccp", + Code: P_DCCP, + VCode: CodeToVarint(P_DCCP), + Size: 16, + Path: false, + Transcoder: TranscoderPort, + } + protoIP6 = Protocol{ + Name: "ip6", + Code: P_IP6, + VCode: CodeToVarint(P_IP6), + Size: 128, + Transcoder: TranscoderIP6, + } + // these require varint + protoIP6ZONE = Protocol{ + Name: "ip6zone", + Code: P_IP6ZONE, + VCode: CodeToVarint(P_IP6ZONE), + Size: LengthPrefixedVarSize, + Path: false, + Transcoder: TranscoderIP6Zone, + } + protoSCTP = Protocol{ + Name: "sctp", + Code: P_SCTP, + VCode: CodeToVarint(P_SCTP), + Size: 16, + Transcoder: TranscoderPort, + } + + protoCIRCUIT = Protocol{ + Code: P_CIRCUIT, + Size: 0, + Name: "p2p-circuit", + VCode: CodeToVarint(P_CIRCUIT), + } + + protoONION2 = Protocol{ + Name: "onion", + Code: P_ONION, + VCode: CodeToVarint(P_ONION), + Size: 96, + Transcoder: TranscoderOnion, + } + protoONION3 = Protocol{ + Name: "onion3", + Code: P_ONION3, + VCode: CodeToVarint(P_ONION3), + Size: 296, + Transcoder: TranscoderOnion3, + } + protoGARLIC64 = Protocol{ + Name: "garlic64", + Code: P_GARLIC64, + VCode: CodeToVarint(P_GARLIC64), + Size: LengthPrefixedVarSize, + Transcoder: TranscoderGarlic64, + } + protoGARLIC32 = Protocol{ + Name: "garlic32", + Code: P_GARLIC32, + VCode: CodeToVarint(P_GARLIC32), + Size: LengthPrefixedVarSize, + Transcoder: TranscoderGarlic32, + } + protoUTP = Protocol{ + Name: "utp", + Code: P_UTP, + VCode: CodeToVarint(P_UTP), + } + protoUDT = Protocol{ + Name: "udt", + Code: P_UDT, + VCode: CodeToVarint(P_UDT), + } + protoQUIC = Protocol{ + Name: "quic", + Code: P_QUIC, + VCode: CodeToVarint(P_QUIC), + } + protoHTTP = Protocol{ + Name: "http", + Code: P_HTTP, + VCode: CodeToVarint(P_HTTP), + } + protoHTTPS = Protocol{ + Name: "https", + Code: P_HTTPS, + VCode: CodeToVarint(P_HTTPS), + } + protoP2P = Protocol{ + Name: "p2p", + Code: P_P2P, + VCode: CodeToVarint(P_P2P), + Size: LengthPrefixedVarSize, + Transcoder: TranscoderP2P, + } + protoUNIX = Protocol{ + Name: "unix", + Code: P_UNIX, + VCode: CodeToVarint(P_UNIX), + Size: LengthPrefixedVarSize, + Path: true, + Transcoder: TranscoderUnix, + } + protoP2P_WEBRTC_DIRECT = Protocol{ + Name: "p2p-webrtc-direct", + Code: P_P2P_WEBRTC_DIRECT, + VCode: CodeToVarint(P_P2P_WEBRTC_DIRECT), + } + protoWS = Protocol{ + Name: "ws", + Code: P_WS, + VCode: CodeToVarint(P_WS), + } + protoWSS = Protocol{ + Name: "wss", + Code: P_WSS, + VCode: CodeToVarint(P_WSS), + } +) + +func init() { + for _, p := range []Protocol{ + protoIP4, + protoTCP, + protoDNS, + protoDNS4, + protoDNS6, + protoDNSADDR, + protoUDP, + protoDCCP, + protoIP6, + protoIP6ZONE, + protoSCTP, + protoCIRCUIT, + protoONION2, + protoONION3, + protoGARLIC64, + protoGARLIC32, + protoUTP, + protoUDT, + protoQUIC, + protoHTTP, + protoHTTPS, + protoP2P, + protoUNIX, + protoP2P_WEBRTC_DIRECT, + protoWS, + protoWSS, + } { + if err := AddProtocol(p); err != nil { + panic(err) + } + } + + // explicitly set both of these + protocolsByName["p2p"] = protoP2P + protocolsByName["ipfs"] = protoP2P +} diff --git a/vendor/github.com/multiformats/go-multiaddr/transcoders.go b/vendor/github.com/multiformats/go-multiaddr/transcoders.go new file mode 100644 index 0000000000..f4a59c30f3 --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/transcoders.go @@ -0,0 +1,359 @@ +package multiaddr + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "fmt" + "net" + "strconv" + "strings" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +type Transcoder interface { + // Validates and encodes to bytes a multiaddr that's in the string representation. + StringToBytes(string) ([]byte, error) + // Validates and decodes to a string a multiaddr that's in the bytes representation. + BytesToString([]byte) (string, error) + // Validates bytes when parsing a multiaddr that's already in the bytes representation. + ValidateBytes([]byte) error +} + +func NewTranscoderFromFunctions( + s2b func(string) ([]byte, error), + b2s func([]byte) (string, error), + val func([]byte) error, +) Transcoder { + return twrp{s2b, b2s, val} +} + +type twrp struct { + strtobyte func(string) ([]byte, error) + bytetostr func([]byte) (string, error) + validbyte func([]byte) error +} + +func (t twrp) StringToBytes(s string) ([]byte, error) { + return t.strtobyte(s) +} +func (t twrp) BytesToString(b []byte) (string, error) { + return t.bytetostr(b) +} + +func (t twrp) ValidateBytes(b []byte) error { + if t.validbyte == nil { + return nil + } + return t.validbyte(b) +} + +var TranscoderIP4 = NewTranscoderFromFunctions(ip4StB, ip4BtS, nil) +var TranscoderIP6 = NewTranscoderFromFunctions(ip6StB, ip6BtS, nil) +var TranscoderIP6Zone = NewTranscoderFromFunctions(ip6zoneStB, ip6zoneBtS, ip6zoneVal) + +func ip4StB(s string) ([]byte, error) { + i := net.ParseIP(s).To4() + if i == nil { + return nil, fmt.Errorf("failed to parse ip4 addr: %s", s) + } + return i, nil +} + +func ip6zoneStB(s string) ([]byte, error) { + if len(s) == 0 { + return nil, fmt.Errorf("empty ip6zone") + } + if strings.Contains(s, "/") { + return nil, fmt.Errorf("IPv6 zone ID contains '/': %s", s) + } + return []byte(s), nil +} + +func ip6zoneBtS(b []byte) (string, error) { + if len(b) == 0 { + return "", fmt.Errorf("invalid length (should be > 0)") + } + return string(b), nil +} + +func ip6zoneVal(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("invalid length (should be > 0)") + } + // Not supported as this would break multiaddrs. + if bytes.IndexByte(b, '/') >= 0 { + return fmt.Errorf("IPv6 zone ID contains '/': %s", string(b)) + } + return nil +} + +func ip6StB(s string) ([]byte, error) { + i := net.ParseIP(s).To16() + if i == nil { + return nil, fmt.Errorf("failed to parse ip6 addr: %s", s) + } + return i, nil +} + +func ip6BtS(b []byte) (string, error) { + ip := net.IP(b) + if ip4 := ip.To4(); ip4 != nil { + // Go fails to prepend the `::ffff:` part. + return "::ffff:" + ip4.String(), nil + } + return ip.String(), nil +} + +func ip4BtS(b []byte) (string, error) { + return net.IP(b).String(), nil +} + +var TranscoderPort = NewTranscoderFromFunctions(portStB, portBtS, nil) + +func portStB(s string) ([]byte, error) { + i, err := strconv.Atoi(s) + if err != nil { + return nil, fmt.Errorf("failed to parse port addr: %s", err) + } + if i >= 65536 { + return nil, fmt.Errorf("failed to parse port addr: %s", "greater than 65536") + } + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, uint16(i)) + return b, nil +} + +func portBtS(b []byte) (string, error) { + i := binary.BigEndian.Uint16(b) + return strconv.Itoa(int(i)), nil +} + +var TranscoderOnion = NewTranscoderFromFunctions(onionStB, onionBtS, nil) + +func onionStB(s string) ([]byte, error) { + addr := strings.Split(s, ":") + if len(addr) != 2 { + return nil, fmt.Errorf("failed to parse onion addr: %s does not contain a port number.", s) + } + + // onion address without the ".onion" substring + if len(addr[0]) != 16 { + return nil, fmt.Errorf("failed to parse onion addr: %s not a Tor onion address.", s) + } + onionHostBytes, err := base32.StdEncoding.DecodeString(strings.ToUpper(addr[0])) + if err != nil { + return nil, fmt.Errorf("failed to decode base32 onion addr: %s %s", s, err) + } + + // onion port number + i, err := strconv.Atoi(addr[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse onion addr: %s", err) + } + if i >= 65536 { + return nil, fmt.Errorf("failed to parse onion addr: %s", "port greater than 65536") + } + if i < 1 { + return nil, fmt.Errorf("failed to parse onion addr: %s", "port less than 1") + } + + onionPortBytes := make([]byte, 2) + binary.BigEndian.PutUint16(onionPortBytes, uint16(i)) + bytes := []byte{} + bytes = append(bytes, onionHostBytes...) + bytes = append(bytes, onionPortBytes...) + return bytes, nil +} + +func onionBtS(b []byte) (string, error) { + addr := strings.ToLower(base32.StdEncoding.EncodeToString(b[0:10])) + port := binary.BigEndian.Uint16(b[10:12]) + return addr + ":" + strconv.Itoa(int(port)), nil +} + +var TranscoderOnion3 = NewTranscoderFromFunctions(onion3StB, onion3BtS, nil) + +func onion3StB(s string) ([]byte, error) { + addr := strings.Split(s, ":") + if len(addr) != 2 { + return nil, fmt.Errorf("failed to parse onion addr: %s does not contain a port number.", s) + } + + // onion address without the ".onion" substring + if len(addr[0]) != 56 { + return nil, fmt.Errorf("failed to parse onion addr: %s not a Tor onionv3 address. len == %d", s, len(addr[0])) + } + onionHostBytes, err := base32.StdEncoding.DecodeString(strings.ToUpper(addr[0])) + if err != nil { + return nil, fmt.Errorf("failed to decode base32 onion addr: %s %s", s, err) + } + + // onion port number + i, err := strconv.Atoi(addr[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse onion addr: %s", err) + } + if i >= 65536 { + return nil, fmt.Errorf("failed to parse onion addr: %s", "port greater than 65536") + } + if i < 1 { + return nil, fmt.Errorf("failed to parse onion addr: %s", "port less than 1") + } + + onionPortBytes := make([]byte, 2) + binary.BigEndian.PutUint16(onionPortBytes, uint16(i)) + bytes := []byte{} + bytes = append(bytes, onionHostBytes[0:35]...) + bytes = append(bytes, onionPortBytes...) + return bytes, nil +} + +func onion3BtS(b []byte) (string, error) { + addr := strings.ToLower(base32.StdEncoding.EncodeToString(b[0:35])) + port := binary.BigEndian.Uint16(b[35:37]) + str := addr + ":" + strconv.Itoa(int(port)) + return str, nil +} + +var TranscoderGarlic64 = NewTranscoderFromFunctions(garlic64StB, garlic64BtS, garlic64Validate) + +// i2p uses an alternate character set for base64 addresses. This returns an appropriate encoder. +var garlicBase64Encoding = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-~") + +func garlic64StB(s string) ([]byte, error) { + // i2p base64 address will be between 516 and 616 characters long, depending on + // certificate type + if len(s) < 516 || len(s) > 616 { + return nil, fmt.Errorf("failed to parse garlic addr: %s not an i2p base64 address. len: %d\n", s, len(s)) + } + garlicHostBytes, err := garlicBase64Encoding.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("failed to decode base64 i2p addr: %s %s", s, err) + } + + return garlicHostBytes, nil +} + +func garlic64BtS(b []byte) (string, error) { + if err := garlic64Validate(b); err != nil { + return "", err + } + addr := garlicBase64Encoding.EncodeToString(b) + return addr, nil +} + +func garlic64Validate(b []byte) error { + // A garlic64 address will always be greater than 386 bytes long when encoded. + if len(b) < 386 { + return fmt.Errorf("failed to validate garlic addr: %s not an i2p base64 address. len: %d\n", b, len(b)) + } + return nil +} + +var TranscoderGarlic32 = NewTranscoderFromFunctions(garlic32StB, garlic32BtS, garlic32Validate) + +var garlicBase32Encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567") + +func garlic32StB(s string) ([]byte, error) { + // an i2p base32 address with a length of greater than 55 characters is + // using an Encrypted Leaseset v2. all other base32 addresses will always be + // exactly 52 characters + if len(s) < 55 && len(s) != 52 { + return nil, fmt.Errorf("failed to parse garlic addr: %s not a i2p base32 address. len: %d", s, len(s)) + } + for len(s)%8 != 0 { + s += "=" + } + garlicHostBytes, err := garlicBase32Encoding.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("failed to decode base32 garlic addr: %s, err: %v len: %v", s, err, len(s)) + } + return garlicHostBytes, nil +} + +func garlic32BtS(b []byte) (string, error) { + if err := garlic32Validate(b); err != nil { + return "", err + } + return strings.TrimRight(garlicBase32Encoding.EncodeToString(b), "="), nil +} + +func garlic32Validate(b []byte) error { + // an i2p base64 for an Encrypted Leaseset v2 will be at least 35 bytes + // long other than that, they will be exactly 32 bytes + if len(b) < 35 && len(b) != 32 { + return fmt.Errorf("failed to validate garlic addr: %s not an i2p base32 address. len: %d\n", b, len(b)) + } + return nil +} + +var TranscoderP2P = NewTranscoderFromFunctions(p2pStB, p2pBtS, p2pVal) + +// The encoded peer ID can either be a CID of a key or a raw multihash (identity +// or sha256-256). +func p2pStB(s string) ([]byte, error) { + // check if the address is a base58 encoded sha256 or identity multihash + if strings.HasPrefix(s, "Qm") || strings.HasPrefix(s, "1") { + m, err := mh.FromB58String(s) + if err != nil { + return nil, fmt.Errorf("failed to parse p2p addr: %s %s", s, err) + } + return m, nil + } + + // check if the address is a CID + c, err := cid.Decode(s) + if err != nil { + return nil, fmt.Errorf("failed to parse p2p addr: %s %s", s, err) + } + + if ty := c.Type(); ty == cid.Libp2pKey { + return c.Hash(), nil + } else { + return nil, fmt.Errorf("failed to parse p2p addr: %s has the invalid codec %d", s, ty) + } +} + +func p2pVal(b []byte) error { + _, err := mh.Cast(b) + return err +} + +func p2pBtS(b []byte) (string, error) { + m, err := mh.Cast(b) + if err != nil { + return "", err + } + return m.B58String(), nil +} + +var TranscoderUnix = NewTranscoderFromFunctions(unixStB, unixBtS, nil) + +func unixStB(s string) ([]byte, error) { + return []byte(s), nil +} + +func unixBtS(b []byte) (string, error) { + return string(b), nil +} + +var TranscoderDns = NewTranscoderFromFunctions(dnsStB, dnsBtS, dnsVal) + +func dnsVal(b []byte) error { + if bytes.IndexByte(b, '/') >= 0 { + return fmt.Errorf("domain name %q contains a slash", string(b)) + } + return nil +} + +func dnsStB(s string) ([]byte, error) { + return []byte(s), nil +} + +func dnsBtS(b []byte) (string, error) { + return string(b), nil +} diff --git a/vendor/github.com/multiformats/go-multiaddr/util.go b/vendor/github.com/multiformats/go-multiaddr/util.go new file mode 100644 index 0000000000..cf4469affa --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/util.go @@ -0,0 +1,180 @@ +package multiaddr + +import "fmt" + +// Split returns the sub-address portions of a multiaddr. +func Split(m Multiaddr) []Multiaddr { + if _, ok := m.(*Component); ok { + return []Multiaddr{m} + } + var addrs []Multiaddr + ForEach(m, func(c Component) bool { + addrs = append(addrs, &c) + return true + }) + return addrs +} + +// Join returns a combination of addresses. +func Join(ms ...Multiaddr) Multiaddr { + switch len(ms) { + case 0: + // empty multiaddr, unfortunately, we have callers that rely on + // this contract. + return &multiaddr{} + case 1: + return ms[0] + } + + length := 0 + bs := make([][]byte, len(ms)) + for i, m := range ms { + bs[i] = m.Bytes() + length += len(bs[i]) + } + + bidx := 0 + b := make([]byte, length) + for _, mb := range bs { + bidx += copy(b[bidx:], mb) + } + return &multiaddr{bytes: b} +} + +// Cast re-casts a byte slice as a multiaddr. will panic if it fails to parse. +func Cast(b []byte) Multiaddr { + m, err := NewMultiaddrBytes(b) + if err != nil { + panic(fmt.Errorf("multiaddr failed to parse: %s", err)) + } + return m +} + +// StringCast like Cast, but parses a string. Will also panic if it fails to parse. +func StringCast(s string) Multiaddr { + m, err := NewMultiaddr(s) + if err != nil { + panic(fmt.Errorf("multiaddr failed to parse: %s", err)) + } + return m +} + +// SplitFirst returns the first component and the rest of the multiaddr. +func SplitFirst(m Multiaddr) (*Component, Multiaddr) { + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + return c, nil + } + + b := m.Bytes() + if len(b) == 0 { + return nil, nil + } + n, c, err := readComponent(b) + if err != nil { + panic(err) + } + if len(b) == n { + return &c, nil + } + return &c, &multiaddr{b[n:]} +} + +// SplitLast returns the rest of the multiaddr and the last component. +func SplitLast(m Multiaddr) (Multiaddr, *Component) { + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + return nil, c + } + + b := m.Bytes() + if len(b) == 0 { + return nil, nil + } + + var ( + c Component + err error + offset int + ) + for { + var n int + n, c, err = readComponent(b[offset:]) + if err != nil { + panic(err) + } + if len(b) == n+offset { + // Reached end + if offset == 0 { + // Only one component + return nil, &c + } + return &multiaddr{b[:offset]}, &c + } + offset += n + } +} + +// SplitFunc splits the multiaddr when the callback first returns true. The +// component on which the callback first returns will be included in the +// *second* multiaddr. +func SplitFunc(m Multiaddr, cb func(Component) bool) (Multiaddr, Multiaddr) { + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + if cb(*c) { + return nil, m + } + return m, nil + } + b := m.Bytes() + if len(b) == 0 { + return nil, nil + } + var ( + c Component + err error + offset int + ) + for offset < len(b) { + var n int + n, c, err = readComponent(b[offset:]) + if err != nil { + panic(err) + } + if cb(c) { + break + } + offset += n + } + switch offset { + case 0: + return nil, m + case len(b): + return m, nil + default: + return &multiaddr{b[:offset]}, &multiaddr{b[offset:]} + } +} + +// ForEach walks over the multiaddr, component by component. +// +// This function iterates over components *by value* to avoid allocating. +func ForEach(m Multiaddr, cb func(c Component) bool) { + // Shortcut if we already have a component + if c, ok := m.(*Component); ok { + cb(*c) + return + } + + b := m.Bytes() + for len(b) > 0 { + n, c, err := readComponent(b) + if err != nil { + panic(err) + } + if !cb(c) { + return + } + b = b[n:] + } +} diff --git a/vendor/github.com/multiformats/go-multiaddr/varint.go b/vendor/github.com/multiformats/go-multiaddr/varint.go new file mode 100644 index 0000000000..d1ea7fc4fc --- /dev/null +++ b/vendor/github.com/multiformats/go-multiaddr/varint.go @@ -0,0 +1,27 @@ +package multiaddr + +import ( + "math" + + "github.com/multiformats/go-varint" +) + +// CodeToVarint converts an integer to a varint-encoded []byte +func CodeToVarint(num int) []byte { + if num < 0 || num > math.MaxInt32 { + panic("invalid code") + } + return varint.ToUvarint(uint64(num)) +} + +func ReadVarintCode(b []byte) (int, int, error) { + code, n, err := varint.FromUvarint(b) + if err != nil { + return 0, 0, err + } + if code > math.MaxInt32 { + // we only allow 32bit codes. + return 0, 0, varint.ErrOverflow + } + return int(code), n, err +} diff --git a/vendor/github.com/multiformats/go-multibase/.codecov.yml b/vendor/github.com/multiformats/go-multibase/.codecov.yml new file mode 100644 index 0000000000..db2472009c --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/.codecov.yml @@ -0,0 +1 @@ +comment: off diff --git a/vendor/github.com/multiformats/go-multibase/.gitignore b/vendor/github.com/multiformats/go-multibase/.gitignore new file mode 100644 index 0000000000..175b291646 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/.gitignore @@ -0,0 +1,3 @@ +*.swp + +multibase-conv/multibase-conv diff --git a/vendor/github.com/multiformats/go-multibase/.gitmodules b/vendor/github.com/multiformats/go-multibase/.gitmodules new file mode 100644 index 0000000000..74c037fa66 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/.gitmodules @@ -0,0 +1,3 @@ +[submodule "spec"] + path = spec + url = https://github.com/multiformats/multibase.git diff --git a/vendor/github.com/multiformats/go-multibase/.gxignore b/vendor/github.com/multiformats/go-multibase/.gxignore new file mode 100644 index 0000000000..c1d28ba5bb --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/.gxignore @@ -0,0 +1,2 @@ +/spec/ +*_test.go diff --git a/vendor/github.com/multiformats/go-multibase/.travis.yml b/vendor/github.com/multiformats/go-multibase/.travis.yml new file mode 100644 index 0000000000..18f4287d76 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/.travis.yml @@ -0,0 +1,32 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gx + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - /home/travis/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/multiformats/go-multibase/LICENSE b/vendor/github.com/multiformats/go-multibase/LICENSE new file mode 100644 index 0000000000..f64ffb042d --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Protocol Labs Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-multibase/Makefile b/vendor/github.com/multiformats/go-multibase/Makefile new file mode 100644 index 0000000000..411b4a8880 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/Makefile @@ -0,0 +1,13 @@ +test: deps + go test -race -v ./... + +export IPFS_API ?= v04x.ipfs.io + +gx: + go get -u github.com/whyrusleeping/gx + go get -u github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite + go get -t ./... diff --git a/vendor/github.com/multiformats/go-multibase/README.md b/vendor/github.com/multiformats/go-multibase/README.md new file mode 100644 index 0000000000..3c745445a8 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/README.md @@ -0,0 +1,49 @@ +# go-multibase + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![Travis CI](https://img.shields.io/travis/multiformats/go-multibase.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-multibase) +[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-multibase.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-multibase?branch=master) + +> Implementation of [multibase](https://github.com/multiformats/multibase) -self identifying base encodings- in Go. + + +## Install + +`go-multibase` is a standard Go module which can be installed with: + +```sh +go get github.com/multiformats/go-multibase +``` + +Note that `go-multibase` is packaged with Gx, so it is recommended to use Gx to install and use it (see Usage section). + +## Usage + +This module is packaged with [Gx](https://github.com/whyrusleeping/gx). In order to use it in your own project it is recommended that you: + +```sh +go get -u github.com/whyrusleeping/gx +go get -u github.com/whyrusleeping/gx-go +cd +gx init +gx import github.com/multiformats/go-multibase +gx install --global +gx-go --rewrite +``` + +Please check [Gx](https://github.com/whyrusleeping/gx) and [Gx-go](https://github.com/whyrusleeping/gx-go) documentation for more information. + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multibase/issues). + +Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +[MIT](LICENSE) © 2016 Protocol Labs Inc. diff --git a/vendor/github.com/multiformats/go-multibase/base16.go b/vendor/github.com/multiformats/go-multibase/base16.go new file mode 100644 index 0000000000..6b8794191a --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/base16.go @@ -0,0 +1,21 @@ +package multibase + +func hexEncodeToStringUpper(src []byte) string { + dst := make([]byte, len(src)*2) + hexEncodeUpper(dst, src) + return string(dst) +} + +var hexTableUppers = [16]byte{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'A', 'B', 'C', 'D', 'E', 'F', +} + +func hexEncodeUpper(dst, src []byte) int { + for i, v := range src { + dst[i*2] = hexTableUppers[v>>4] + dst[i*2+1] = hexTableUppers[v&0x0f] + } + + return len(src) * 2 +} diff --git a/vendor/github.com/multiformats/go-multibase/base2.go b/vendor/github.com/multiformats/go-multibase/base2.go new file mode 100644 index 0000000000..6e3f0cfff2 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/base2.go @@ -0,0 +1,52 @@ +package multibase + +import ( + "fmt" + "strconv" + "strings" +) + +// binaryEncodeToString takes an array of bytes and returns +// multibase binary representation +func binaryEncodeToString(src []byte) string { + dst := make([]byte, len(src)*8) + encodeBinary(dst, src) + return string(dst) +} + +// encodeBinary takes the src and dst bytes and converts each +// byte to their binary rep using power reduction method +func encodeBinary(dst []byte, src []byte) { + for i, b := range src { + for j := 0; j < 8; j++ { + if b&(1<>3) + + for i, dstIndex := 0, 0; i < len(s); i = i + 8 { + value, err := strconv.ParseInt(s[i:i+8], 2, 0) + if err != nil { + return nil, fmt.Errorf("error while conversion: %s", err) + } + + data[dstIndex] = byte(value) + dstIndex++ + } + + return data, nil +} diff --git a/vendor/github.com/multiformats/go-multibase/base32.go b/vendor/github.com/multiformats/go-multibase/base32.go new file mode 100644 index 0000000000..a6fe8eb064 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/base32.go @@ -0,0 +1,17 @@ +package multibase + +import ( + b32 "github.com/multiformats/go-base32" +) + +var base32StdLowerPad = b32.NewEncodingCI("abcdefghijklmnopqrstuvwxyz234567") +var base32StdLowerNoPad = base32StdLowerPad.WithPadding(b32.NoPadding) + +var base32StdUpperPad = b32.NewEncodingCI("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567") +var base32StdUpperNoPad = base32StdUpperPad.WithPadding(b32.NoPadding) + +var base32HexLowerPad = b32.NewEncodingCI("0123456789abcdefghijklmnopqrstuv") +var base32HexLowerNoPad = base32HexLowerPad.WithPadding(b32.NoPadding) + +var base32HexUpperPad = b32.NewEncodingCI("0123456789ABCDEFGHIJKLMNOPQRSTUV") +var base32HexUpperNoPad = base32HexUpperPad.WithPadding(b32.NoPadding) diff --git a/vendor/github.com/multiformats/go-multibase/encoder.go b/vendor/github.com/multiformats/go-multibase/encoder.go new file mode 100644 index 0000000000..42e753f5ca --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/encoder.go @@ -0,0 +1,63 @@ +package multibase + +import ( + "fmt" +) + +// Encoder is a multibase encoding that is verified to be supported and +// supports an Encode method that does not return an error +type Encoder struct { + enc Encoding +} + +// NewEncoder create a new Encoder from an Encoding +func NewEncoder(base Encoding) (Encoder, error) { + _, ok := EncodingToStr[base] + if !ok { + return Encoder{-1}, fmt.Errorf("Unsupported multibase encoding: %d", base) + } + return Encoder{base}, nil +} + +// MustNewEncoder is like NewEncoder but will panic if the encoding is +// invalid. +func MustNewEncoder(base Encoding) Encoder { + _, ok := EncodingToStr[base] + if !ok { + panic("Unsupported multibase encoding") + } + return Encoder{base} +} + +// EncoderByName creates an encoder from a string, the string can +// either be the multibase name or single character multibase prefix +func EncoderByName(str string) (Encoder, error) { + var base Encoding + ok := true + if len(str) == 0 { + return Encoder{-1}, fmt.Errorf("Empty multibase encoding") + } else if len(str) == 1 { + base = Encoding(str[0]) + _, ok = EncodingToStr[base] + } else { + base, ok = Encodings[str] + } + if !ok { + return Encoder{-1}, fmt.Errorf("Unsupported multibase encoding: %s", str) + } + return Encoder{base}, nil +} + +func (p Encoder) Encoding() Encoding { + return p.enc +} + +// Encode encodes the multibase using the given Encoder. +func (p Encoder) Encode(data []byte) string { + str, err := Encode(p.enc, data) + if err != nil { + // should not happen + panic(err) + } + return str +} diff --git a/vendor/github.com/multiformats/go-multibase/go.mod b/vendor/github.com/multiformats/go-multibase/go.mod new file mode 100644 index 0000000000..28d6eb12ca --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/go.mod @@ -0,0 +1,6 @@ +module github.com/multiformats/go-multibase + +require ( + github.com/mr-tron/base58 v1.1.0 + github.com/multiformats/go-base32 v0.0.3 +) diff --git a/vendor/github.com/multiformats/go-multibase/go.sum b/vendor/github.com/multiformats/go-multibase/go.sum new file mode 100644 index 0000000000..510e3dcdc2 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/go.sum @@ -0,0 +1,4 @@ +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= diff --git a/vendor/github.com/multiformats/go-multibase/multibase.go b/vendor/github.com/multiformats/go-multibase/multibase.go new file mode 100644 index 0000000000..a60403d549 --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/multibase.go @@ -0,0 +1,186 @@ +package multibase + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + + b58 "github.com/mr-tron/base58/base58" + b32 "github.com/multiformats/go-base32" +) + +// Encoding identifies the type of base-encoding that a multibase is carrying. +type Encoding int + +// These are the encodings specified in the standard, not are all +// supported yet +const ( + Identity = 0x00 + Base2 = '0' + Base8 = '7' + Base10 = '9' + Base16 = 'f' + Base16Upper = 'F' + Base32 = 'b' + Base32Upper = 'B' + Base32pad = 'c' + Base32padUpper = 'C' + Base32hex = 'v' + Base32hexUpper = 'V' + Base32hexPad = 't' + Base32hexPadUpper = 'T' + Base58Flickr = 'Z' + Base58BTC = 'z' + Base64 = 'm' + Base64url = 'u' + Base64pad = 'M' + Base64urlPad = 'U' +) + +// Encodings is a map of the supported encoding, unsupported encoding +// specified in standard are left out +var Encodings = map[string]Encoding{ + "identity": 0x00, + "base2": '0', + "base16": 'f', + "base16upper": 'F', + "base32": 'b', + "base32upper": 'B', + "base32pad": 'c', + "base32padupper": 'C', + "base32hex": 'v', + "base32hexupper": 'V', + "base32hexpad": 't', + "base32hexpadupper": 'T', + "base58flickr": 'Z', + "base58btc": 'z', + "base64": 'm', + "base64url": 'u', + "base64pad": 'M', + "base64urlpad": 'U', +} + +var EncodingToStr = map[Encoding]string{ + 0x00: "identity", + '0': "base2", + 'f': "base16", + 'F': "base16upper", + 'b': "base32", + 'B': "base32upper", + 'c': "base32pad", + 'C': "base32padupper", + 'v': "base32hex", + 'V': "base32hexupper", + 't': "base32hexpad", + 'T': "base32hexpadupper", + 'Z': "base58flickr", + 'z': "base58btc", + 'm': "base64", + 'u': "base64url", + 'M': "base64pad", + 'U': "base64urlpad", +} + +// ErrUnsupportedEncoding is returned when the selected encoding is not known or +// implemented. +var ErrUnsupportedEncoding = fmt.Errorf("selected encoding not supported") + +// Encode encodes a given byte slice with the selected encoding and returns a +// multibase string (). It will return +// an error if the selected base is not known. +func Encode(base Encoding, data []byte) (string, error) { + switch base { + case Identity: + // 0x00 inside a string is OK in golang and causes no problems with the length calculation. + return string(Identity) + string(data), nil + case Base2: + return string(Base2) + binaryEncodeToString(data), nil + case Base16: + return string(Base16) + hex.EncodeToString(data), nil + case Base16Upper: + return string(Base16Upper) + hexEncodeToStringUpper(data), nil + case Base32: + return string(Base32) + base32StdLowerNoPad.EncodeToString(data), nil + case Base32Upper: + return string(Base32Upper) + base32StdUpperNoPad.EncodeToString(data), nil + case Base32hex: + return string(Base32hex) + base32HexLowerNoPad.EncodeToString(data), nil + case Base32hexUpper: + return string(Base32hexUpper) + base32HexUpperNoPad.EncodeToString(data), nil + case Base32pad: + return string(Base32pad) + base32StdLowerPad.EncodeToString(data), nil + case Base32padUpper: + return string(Base32padUpper) + base32StdUpperPad.EncodeToString(data), nil + case Base32hexPad: + return string(Base32hexPad) + base32HexLowerPad.EncodeToString(data), nil + case Base32hexPadUpper: + return string(Base32hexPadUpper) + base32HexUpperPad.EncodeToString(data), nil + case Base58BTC: + return string(Base58BTC) + b58.EncodeAlphabet(data, b58.BTCAlphabet), nil + case Base58Flickr: + return string(Base58Flickr) + b58.EncodeAlphabet(data, b58.FlickrAlphabet), nil + case Base64pad: + return string(Base64pad) + base64.StdEncoding.EncodeToString(data), nil + case Base64urlPad: + return string(Base64urlPad) + base64.URLEncoding.EncodeToString(data), nil + case Base64url: + return string(Base64url) + base64.RawURLEncoding.EncodeToString(data), nil + case Base64: + return string(Base64) + base64.RawStdEncoding.EncodeToString(data), nil + default: + return "", ErrUnsupportedEncoding + } +} + +// Decode takes a multibase string and decodes into a bytes buffer. +// It will return an error if the selected base is not known. +func Decode(data string) (Encoding, []byte, error) { + if len(data) == 0 { + return 0, nil, fmt.Errorf("cannot decode multibase for zero length string") + } + + enc := Encoding(data[0]) + + switch enc { + case Identity: + return Identity, []byte(data[1:]), nil + case Base2: + bytes, err := decodeBinaryString(data[1:]) + return enc, bytes, err + case Base16, Base16Upper: + bytes, err := hex.DecodeString(data[1:]) + return enc, bytes, err + case Base32, Base32Upper: + bytes, err := b32.RawStdEncoding.DecodeString(data[1:]) + return enc, bytes, err + case Base32hex, Base32hexUpper: + bytes, err := b32.RawHexEncoding.DecodeString(data[1:]) + return enc, bytes, err + case Base32pad, Base32padUpper: + bytes, err := b32.StdEncoding.DecodeString(data[1:]) + return enc, bytes, err + case Base32hexPad, Base32hexPadUpper: + bytes, err := b32.HexEncoding.DecodeString(data[1:]) + return enc, bytes, err + case Base58BTC: + bytes, err := b58.DecodeAlphabet(data[1:], b58.BTCAlphabet) + return Base58BTC, bytes, err + case Base58Flickr: + bytes, err := b58.DecodeAlphabet(data[1:], b58.FlickrAlphabet) + return Base58Flickr, bytes, err + case Base64pad: + bytes, err := base64.StdEncoding.DecodeString(data[1:]) + return Base64pad, bytes, err + case Base64urlPad: + bytes, err := base64.URLEncoding.DecodeString(data[1:]) + return Base64urlPad, bytes, err + case Base64: + bytes, err := base64.RawStdEncoding.DecodeString(data[1:]) + return Base64, bytes, err + case Base64url: + bytes, err := base64.RawURLEncoding.DecodeString(data[1:]) + return Base64url, bytes, err + default: + return -1, nil, ErrUnsupportedEncoding + } +} diff --git a/vendor/github.com/multiformats/go-multibase/package.json b/vendor/github.com/multiformats/go-multibase/package.json new file mode 100644 index 0000000000..75f742e6de --- /dev/null +++ b/vendor/github.com/multiformats/go-multibase/package.json @@ -0,0 +1,30 @@ +{ + "author": "whyrusleeping", + "bugs": { + "url": "https://github.com/multiformats/go-multibase" + }, + "gx": { + "dvcsimport": "github.com/multiformats/go-multibase" + }, + "gxDependencies": [ + { + "author": "mr-tron", + "hash": "QmWFAMPqsEyUX7gDUsRVmMWz59FxSpJ1b2v6bJ1yYzo7jY", + "name": "go-base58-fast", + "version": "0.1.1" + }, + { + "author": "Golang", + "hash": "QmPbbYin7KBd1Y1BfUe15vHzwJiioyi3wtKQTtXWWf8SC5", + "name": "base32", + "version": "0.0.3" + } + ], + "gxVersion": "0.8.0", + "language": "go", + "license": "", + "name": "go-multibase", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "0.3.0" +} + diff --git a/vendor/github.com/multiformats/go-multihash/.gitignore b/vendor/github.com/multiformats/go-multihash/.gitignore new file mode 100644 index 0000000000..1d74e21965 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/.gitignore @@ -0,0 +1 @@ +.vscode/ diff --git a/vendor/github.com/multiformats/go-multihash/.gitmodules b/vendor/github.com/multiformats/go-multihash/.gitmodules new file mode 100644 index 0000000000..d92ce4f1bc --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/.gitmodules @@ -0,0 +1,6 @@ +[submodule "spec/multicodec"] + path = spec/multicodec + url = https://github.com/multiformats/multicodec.git +[submodule "spec/multihash"] + path = spec/multihash + url = https://github.com/multiformats/multihash.git diff --git a/vendor/github.com/multiformats/go-multihash/.travis.yml b/vendor/github.com/multiformats/go-multihash/.travis.yml new file mode 100644 index 0000000000..09f9a4cc36 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/pkg/mod + - /home/travis/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/multiformats/go-multihash/LICENSE b/vendor/github.com/multiformats/go-multihash/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-multihash/Makefile b/vendor/github.com/multiformats/go-multihash/Makefile new file mode 100644 index 0000000000..20619413c9 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/Makefile @@ -0,0 +1,11 @@ +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite + +publish: + gx-go rewrite --undo + diff --git a/vendor/github.com/multiformats/go-multihash/README.md b/vendor/github.com/multiformats/go-multihash/README.md new file mode 100644 index 0000000000..dd7f2386ad --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/README.md @@ -0,0 +1,90 @@ +# go-multihash + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/multiformats/go-multihash?status.svg)](https://godoc.org/github.com/multiformats/go-multihash) +[![Travis CI](https://img.shields.io/travis/multiformats/go-multihash.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-multihash) +[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-multihash.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-multihash?branch=master) + +> [multihash](https://github.com/multiformats/multihash) implementation in Go + +## Table of Contents + +- [Install](#install) +- [Usage](#usage) +- [Maintainers](#maintainers) +- [Contribute](#contribute) +- [License](#license) + +## Install + +`go-multihash` is a standard Go module which can be installed with: + +```sh +go get github.com/multiformats/go-multihash +``` + +## Usage + + +### Example + +This example takes a standard hex-encoded data and uses `EncodeName` to calculate the SHA1 multihash value for the buffer. + +The resulting hex-encoded data corresponds to: ``, which could be re-parsed +with `Multihash.FromHexString()`. + + +```go +package main + +import ( + "encoding/hex" + "fmt" + + "github.com/multiformats/go-multihash" +) + +func main() { + // ignores errors for simplicity. + // don't do that at home. + // Decode a SHA1 hash to a binary buffer + buf, _ := hex.DecodeString("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") + + // Create a new multihash with it. + mHashBuf, _ := multihash.EncodeName(buf, "sha1") + // Print the multihash as hex string + fmt.Printf("hex: %s\n", hex.EncodeToString(mHashBuf)) + + // Parse the binary multihash to a DecodedMultihash + mHash, _ := multihash.Decode(mHashBuf) + // Convert the sha1 value to hex string + sha1hex := hex.EncodeToString(mHash.Digest) + // Print all the information in the multihash + fmt.Printf("obj: %v 0x%x %d %s\n", mHash.Name, mHash.Code, mHash.Length, sha1hex) +} +``` + +To run, copy to [example/foo.go](example/foo.go) and: + +``` +> cd example/ +> go build +> ./example +hex: 11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 +obj: sha1 0x11 20 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 +``` + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multihash/issues). + +Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +[MIT](LICENSE) © 2014 Juan Batiz-Benet diff --git a/vendor/github.com/multiformats/go-multihash/codecov.yml b/vendor/github.com/multiformats/go-multihash/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/multiformats/go-multihash/go.mod b/vendor/github.com/multiformats/go-multihash/go.mod new file mode 100644 index 0000000000..9a825bbc7e --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/go.mod @@ -0,0 +1,12 @@ +module github.com/multiformats/go-multihash + +require ( + github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 + github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 + github.com/mr-tron/base58 v1.1.3 + github.com/multiformats/go-varint v0.0.5 + github.com/spaolacci/murmur3 v1.1.0 + golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 +) + +go 1.13 diff --git a/vendor/github.com/multiformats/go-multihash/go.sum b/vendor/github.com/multiformats/go-multihash/go.sum new file mode 100644 index 0000000000..6425497697 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/go.sum @@ -0,0 +1,22 @@ +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-varint v0.0.3 h1:1OZFaq4XbSNQE6ujqgr6/EIZlgHE7DmojAFsLqAJ26M= +github.com/multiformats/go-varint v0.0.3/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.4 h1:CplQWhUouUgTZ53vNFE8VoWr2VjaKXci+xyrKyyFuSw= +github.com/multiformats/go-varint v0.0.4/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/multiformats/go-multihash/io.go b/vendor/github.com/multiformats/go-multihash/io.go new file mode 100644 index 0000000000..3a31baa418 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/io.go @@ -0,0 +1,98 @@ +package multihash + +import ( + "errors" + "io" + "math" + + "github.com/multiformats/go-varint" +) + +// Reader is an io.Reader wrapper that exposes a function +// to read a whole multihash, parse it, and return it. +type Reader interface { + io.Reader + + ReadMultihash() (Multihash, error) +} + +// Writer is an io.Writer wrapper that exposes a function +// to write a whole multihash. +type Writer interface { + io.Writer + + WriteMultihash(Multihash) error +} + +// NewReader wraps an io.Reader with a multihash.Reader +func NewReader(r io.Reader) Reader { + return &mhReader{r} +} + +// NewWriter wraps an io.Writer with a multihash.Writer +func NewWriter(w io.Writer) Writer { + return &mhWriter{w} +} + +type mhReader struct { + r io.Reader +} + +func (r *mhReader) Read(buf []byte) (n int, err error) { + return r.r.Read(buf) +} + +func (r *mhReader) ReadByte() (byte, error) { + if br, ok := r.r.(io.ByteReader); ok { + return br.ReadByte() + } + var b [1]byte + n, err := r.r.Read(b[:]) + if n == 1 { + return b[0], nil + } + if err == nil { + if n != 0 { + panic("reader returned an invalid length") + } + err = io.ErrNoProgress + } + return 0, err +} + +func (r *mhReader) ReadMultihash() (Multihash, error) { + code, err := varint.ReadUvarint(r) + if err != nil { + return nil, err + } + + length, err := varint.ReadUvarint(r) + if err != nil { + return nil, err + } + if length > math.MaxInt32 { + return nil, errors.New("digest too long, supporting only <= 2^31-1") + } + + buf := make([]byte, varint.UvarintSize(code)+varint.UvarintSize(length)+int(length)) + n := varint.PutUvarint(buf, code) + n += varint.PutUvarint(buf[n:], length) + if _, err := io.ReadFull(r.r, buf[n:]); err != nil { + return nil, err + } + + return Cast(buf) +} + +type mhWriter struct { + w io.Writer +} + +func (w *mhWriter) Write(buf []byte) (n int, err error) { + return w.w.Write(buf) +} + +func (w *mhWriter) WriteMultihash(m Multihash) error { + _, err := w.w.Write([]byte(m)) + return err +} diff --git a/vendor/github.com/multiformats/go-multihash/multihash.go b/vendor/github.com/multiformats/go-multihash/multihash.go new file mode 100644 index 0000000000..370e259c37 --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/multihash.go @@ -0,0 +1,337 @@ +// Package multihash is the Go implementation of +// https://github.com/multiformats/multihash, or self-describing +// hashes. +package multihash + +import ( + "encoding/hex" + "errors" + "fmt" + "math" + + b58 "github.com/mr-tron/base58/base58" + "github.com/multiformats/go-varint" +) + +// errors +var ( + ErrUnknownCode = errors.New("unknown multihash code") + ErrTooShort = errors.New("multihash too short. must be >= 2 bytes") + ErrTooLong = errors.New("multihash too long. must be < 129 bytes") + ErrLenNotSupported = errors.New("multihash does not yet support digests longer than 127 bytes") + ErrInvalidMultihash = errors.New("input isn't valid multihash") + + ErrVarintBufferShort = errors.New("uvarint: buffer too small") + ErrVarintTooLong = errors.New("uvarint: varint too big (max 64bit)") +) + +// ErrInconsistentLen is returned when a decoded multihash has an inconsistent length +type ErrInconsistentLen struct { + dm *DecodedMultihash +} + +func (e ErrInconsistentLen) Error() string { + return fmt.Sprintf("multihash length inconsistent: expected %d, got %d", e.dm.Length, len(e.dm.Digest)) +} + +// constants +const ( + IDENTITY = 0x00 + // Deprecated: use IDENTITY + ID = IDENTITY + SHA1 = 0x11 + SHA2_256 = 0x12 + SHA2_512 = 0x13 + SHA3_224 = 0x17 + SHA3_256 = 0x16 + SHA3_384 = 0x15 + SHA3_512 = 0x14 + SHA3 = SHA3_512 + KECCAK_224 = 0x1A + KECCAK_256 = 0x1B + KECCAK_384 = 0x1C + KECCAK_512 = 0x1D + + SHAKE_128 = 0x18 + SHAKE_256 = 0x19 + + BLAKE2B_MIN = 0xb201 + BLAKE2B_MAX = 0xb240 + BLAKE2S_MIN = 0xb241 + BLAKE2S_MAX = 0xb260 + + MD5 = 0xd5 + + DBL_SHA2_256 = 0x56 + + MURMUR3_128 = 0x22 + // Deprecated: use MURMUR3_128 + MURMUR3 = MURMUR3_128 + + SHA2_256_TRUNC254_PADDED = 0x1012 + X11 = 0x1100 + POSEIDON_BLS12_381_A1_FC1 = 0xb401 +) + +func init() { + // Add blake2b (64 codes) + for c := uint64(BLAKE2B_MIN); c <= BLAKE2B_MAX; c++ { + n := c - BLAKE2B_MIN + 1 + name := fmt.Sprintf("blake2b-%d", n*8) + Names[name] = c + Codes[c] = name + DefaultLengths[c] = int(n) + } + + // Add blake2s (32 codes) + for c := uint64(BLAKE2S_MIN); c <= BLAKE2S_MAX; c++ { + n := c - BLAKE2S_MIN + 1 + name := fmt.Sprintf("blake2s-%d", n*8) + Names[name] = c + Codes[c] = name + DefaultLengths[c] = int(n) + } +} + +// Names maps the name of a hash to the code +var Names = map[string]uint64{ + "identity": IDENTITY, + "sha1": SHA1, + "sha2-256": SHA2_256, + "sha2-512": SHA2_512, + "sha3": SHA3_512, + "sha3-224": SHA3_224, + "sha3-256": SHA3_256, + "sha3-384": SHA3_384, + "sha3-512": SHA3_512, + "dbl-sha2-256": DBL_SHA2_256, + "murmur3-128": MURMUR3_128, + "keccak-224": KECCAK_224, + "keccak-256": KECCAK_256, + "keccak-384": KECCAK_384, + "keccak-512": KECCAK_512, + "shake-128": SHAKE_128, + "shake-256": SHAKE_256, + "sha2-256-trunc254-padded": SHA2_256_TRUNC254_PADDED, + "x11": X11, + "md5": MD5, + "poseidon-bls12_381-a2-fc1": POSEIDON_BLS12_381_A1_FC1, +} + +// Codes maps a hash code to it's name +var Codes = map[uint64]string{ + IDENTITY: "identity", + SHA1: "sha1", + SHA2_256: "sha2-256", + SHA2_512: "sha2-512", + SHA3_224: "sha3-224", + SHA3_256: "sha3-256", + SHA3_384: "sha3-384", + SHA3_512: "sha3-512", + DBL_SHA2_256: "dbl-sha2-256", + MURMUR3_128: "murmur3-128", + KECCAK_224: "keccak-224", + KECCAK_256: "keccak-256", + KECCAK_384: "keccak-384", + KECCAK_512: "keccak-512", + SHAKE_128: "shake-128", + SHAKE_256: "shake-256", + SHA2_256_TRUNC254_PADDED: "sha2-256-trunc254-padded", + X11: "x11", + POSEIDON_BLS12_381_A1_FC1: "poseidon-bls12_381-a2-fc1", + MD5: "md5", +} + +// DefaultLengths maps a hash code to it's default length +var DefaultLengths = map[uint64]int{ + IDENTITY: -1, + SHA1: 20, + SHA2_256: 32, + SHA2_512: 64, + SHA3_224: 28, + SHA3_256: 32, + SHA3_384: 48, + SHA3_512: 64, + DBL_SHA2_256: 32, + KECCAK_224: 28, + KECCAK_256: 32, + MURMUR3_128: 4, + KECCAK_384: 48, + KECCAK_512: 64, + SHAKE_128: 32, + SHAKE_256: 64, + X11: 64, + MD5: 16, +} + +func uvarint(buf []byte) (uint64, []byte, error) { + n, c, err := varint.FromUvarint(buf) + if err != nil { + return n, buf, err + } + + if c == 0 { + return n, buf, ErrVarintBufferShort + } else if c < 0 { + return n, buf[-c:], ErrVarintTooLong + } else { + return n, buf[c:], nil + } +} + +// DecodedMultihash represents a parsed multihash and allows +// easy access to the different parts of a multihash. +type DecodedMultihash struct { + Code uint64 + Name string + Length int // Length is just int as it is type of len() opearator + Digest []byte // Digest holds the raw multihash bytes +} + +// Multihash is byte slice with the following form: +// . +// See the spec for more information. +type Multihash []byte + +// HexString returns the hex-encoded representation of a multihash. +func (m *Multihash) HexString() string { + return hex.EncodeToString([]byte(*m)) +} + +// String is an alias to HexString(). +func (m *Multihash) String() string { + return m.HexString() +} + +// FromHexString parses a hex-encoded multihash. +func FromHexString(s string) (Multihash, error) { + b, err := hex.DecodeString(s) + if err != nil { + return Multihash{}, err + } + + return Cast(b) +} + +// B58String returns the B58-encoded representation of a multihash. +func (m Multihash) B58String() string { + return b58.Encode([]byte(m)) +} + +// FromB58String parses a B58-encoded multihash. +func FromB58String(s string) (m Multihash, err error) { + b, err := b58.Decode(s) + if err != nil { + return Multihash{}, ErrInvalidMultihash + } + + return Cast(b) +} + +// Cast casts a buffer onto a multihash, and returns an error +// if it does not work. +func Cast(buf []byte) (Multihash, error) { + dm, err := Decode(buf) + if err != nil { + return Multihash{}, err + } + + if !ValidCode(dm.Code) { + return Multihash{}, ErrUnknownCode + } + + return Multihash(buf), nil +} + +// Decode parses multihash bytes into a DecodedMultihash. +func Decode(buf []byte) (*DecodedMultihash, error) { + rlen, code, hdig, err := readMultihashFromBuf(buf) + if err != nil { + return nil, err + } + + dm := &DecodedMultihash{ + Code: code, + Name: Codes[code], + Length: len(hdig), + Digest: hdig, + } + + if len(buf) != rlen { + return nil, ErrInconsistentLen{dm} + } + + return dm, nil +} + +// Encode a hash digest along with the specified function code. +// Note: the length is derived from the length of the digest itself. +func Encode(buf []byte, code uint64) ([]byte, error) { + if !ValidCode(code) { + return nil, ErrUnknownCode + } + + newBuf := make([]byte, varint.UvarintSize(code)+varint.UvarintSize(uint64(len(buf)))+len(buf)) + n := varint.PutUvarint(newBuf, code) + n += varint.PutUvarint(newBuf[n:], uint64(len(buf))) + + copy(newBuf[n:], buf) + return newBuf, nil +} + +// EncodeName is like Encode() but providing a string name +// instead of a numeric code. See Names for allowed values. +func EncodeName(buf []byte, name string) ([]byte, error) { + return Encode(buf, Names[name]) +} + +// ValidCode checks whether a multihash code is valid. +func ValidCode(code uint64) bool { + _, ok := Codes[code] + return ok +} + +// readMultihashFromBuf reads a multihash from the given buffer, returning the +// individual pieces of the multihash. +// Note: the returned digest is a slice over the passed in data and should be +// copied if the buffer will be reused +func readMultihashFromBuf(buf []byte) (int, uint64, []byte, error) { + bufl := len(buf) + if bufl < 2 { + return 0, 0, nil, ErrTooShort + } + + var err error + var code, length uint64 + + code, buf, err = uvarint(buf) + if err != nil { + return 0, 0, nil, err + } + + length, buf, err = uvarint(buf) + if err != nil { + return 0, 0, nil, err + } + + if length > math.MaxInt32 { + return 0, 0, nil, errors.New("digest too long, supporting only <= 2^31-1") + } + if int(length) > len(buf) { + return 0, 0, nil, errors.New("length greater than remaining number of bytes in buffer") + } + + rlen := (bufl - len(buf)) + int(length) + return rlen, code, buf[:length], nil +} + +// MHFromBytes reads a multihash from the given byte buffer, returning the +// number of bytes read as well as the multihash +func MHFromBytes(buf []byte) (int, Multihash, error) { + nr, _, _, err := readMultihashFromBuf(buf) + if err != nil { + return 0, nil, err + } + + return nr, Multihash(buf[:nr]), nil +} diff --git a/vendor/github.com/multiformats/go-multihash/set.go b/vendor/github.com/multiformats/go-multihash/set.go new file mode 100644 index 0000000000..f56a27570a --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/set.go @@ -0,0 +1,66 @@ +package multihash + +// Set is a set of Multihashes, holding one copy per Multihash. +type Set struct { + set map[string]struct{} +} + +// NewSet creates a new set correctly initialized. +func NewSet() *Set { + return &Set{ + set: make(map[string]struct{}), + } +} + +// Add adds a new multihash to the set. +func (s *Set) Add(m Multihash) { + s.set[string(m)] = struct{}{} +} + +// Len returns the number of elements in the set. +func (s *Set) Len() int { + return len(s.set) +} + +// Has returns true if the element is in the set. +func (s *Set) Has(m Multihash) bool { + _, ok := s.set[string(m)] + return ok +} + +// Visit adds a multihash only if it is not in the set already. Returns true +// if the multihash was added (was not in the set before). +func (s *Set) Visit(m Multihash) bool { + _, ok := s.set[string(m)] + if !ok { + s.set[string(m)] = struct{}{} + return true + } + return false +} + +// ForEach runs f(m) with each multihash in the set. If returns immediately if +// f(m) returns an error. +func (s *Set) ForEach(f func(m Multihash) error) error { + for elem := range s.set { + mh := Multihash(elem) + if err := f(mh); err != nil { + return err + } + } + return nil +} + +// Remove removes an element from the set. +func (s *Set) Remove(m Multihash) { + delete(s.set, string(m)) +} + +// All returns a slice with all the elements in the set. +func (s *Set) All() []Multihash { + out := make([]Multihash, 0, len(s.set)) + for m := range s.set { + out = append(out, Multihash(m)) + } + return out +} diff --git a/vendor/github.com/multiformats/go-multihash/sum.go b/vendor/github.com/multiformats/go-multihash/sum.go new file mode 100644 index 0000000000..d6bf2f990e --- /dev/null +++ b/vendor/github.com/multiformats/go-multihash/sum.go @@ -0,0 +1,241 @@ +package multihash + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha512" + "errors" + "fmt" + + blake2b "github.com/minio/blake2b-simd" + sha256 "github.com/minio/sha256-simd" + murmur3 "github.com/spaolacci/murmur3" + blake2s "golang.org/x/crypto/blake2s" + sha3 "golang.org/x/crypto/sha3" +) + +// ErrSumNotSupported is returned when the Sum function code is not implemented +var ErrSumNotSupported = errors.New("Function not implemented. Complain to lib maintainer.") + +var ErrLenTooLarge = errors.New("requested length was too large for digest") + +// HashFunc is a hash function that hashes data into digest. +// +// The length is the size the digest will be truncated to. While the hash +// function isn't responsible for truncating the digest, it may want to error if +// the length is invalid for the hash function (e.g., truncation would make the +// hash useless). +type HashFunc func(data []byte, length int) (digest []byte, err error) + +// funcTable maps multicodec values to hash functions. +var funcTable = make(map[uint64]HashFunc) + +// Sum obtains the cryptographic sum of a given buffer. The length parameter +// indicates the length of the resulting digest and passing a negative value +// use default length values for the selected hash function. +func Sum(data []byte, code uint64, length int) (Multihash, error) { + if !ValidCode(code) { + return nil, fmt.Errorf("invalid multihash code %d", code) + } + + if length < 0 { + var ok bool + length, ok = DefaultLengths[code] + if !ok { + return nil, fmt.Errorf("no default length for code %d", code) + } + } + + hashFunc, ok := funcTable[code] + if !ok { + return nil, ErrSumNotSupported + } + + d, err := hashFunc(data, length) + if err != nil { + return nil, err + } + if len(d) < length { + return nil, ErrLenTooLarge + } + + if length >= 0 { + d = d[:length] + } + return Encode(d, code) +} + +func sumBlake2s32(data []byte, _ int) ([]byte, error) { + d := blake2s.Sum256(data) + return d[:], nil +} +func sumBlake2b(data []byte, size int) ([]byte, error) { + // special case these lengths to avoid allocations. + switch size { + case 32: + hash := blake2b.Sum256(data) + return hash[:], nil + case 64: + hash := blake2b.Sum512(data) + return hash[:], nil + } + + // Ok, allocate away. + hasher, err := blake2b.New(&blake2b.Config{Size: uint8(size)}) + if err != nil { + return nil, err + } + + if _, err := hasher.Write(data); err != nil { + return nil, err + } + + return hasher.Sum(nil)[:], nil +} + +func sumID(data []byte, length int) ([]byte, error) { + if length >= 0 && length != len(data) { + return nil, fmt.Errorf("the length of the identity hash (%d) must be equal to the length of the data (%d)", + length, len(data)) + + } + return data, nil +} + +func sumSHA1(data []byte, length int) ([]byte, error) { + a := sha1.Sum(data) + return a[0:20], nil +} + +func sumSHA256(data []byte, length int) ([]byte, error) { + a := sha256.Sum256(data) + return a[0:32], nil +} + +func sumMD5(data []byte, length int) ([]byte, error) { + a := md5.Sum(data) + return a[0:md5.Size], nil +} + +func sumDoubleSHA256(data []byte, length int) ([]byte, error) { + val, _ := sumSHA256(data, len(data)) + return sumSHA256(val, len(val)) +} + +func sumSHA512(data []byte, length int) ([]byte, error) { + a := sha512.Sum512(data) + return a[0:64], nil +} +func sumKeccak256(data []byte, length int) ([]byte, error) { + h := sha3.NewLegacyKeccak256() + h.Write(data) + return h.Sum(nil), nil +} + +func sumKeccak512(data []byte, length int) ([]byte, error) { + h := sha3.NewLegacyKeccak512() + h.Write(data) + return h.Sum(nil), nil +} + +func sumSHA3_512(data []byte, length int) ([]byte, error) { + a := sha3.Sum512(data) + return a[:], nil +} + +func sumMURMUR3(data []byte, length int) ([]byte, error) { + number := murmur3.Sum32(data) + bytes := make([]byte, 4) + for i := range bytes { + bytes[i] = byte(number & 0xff) + number >>= 8 + } + return bytes, nil +} + +func sumSHAKE128(data []byte, length int) ([]byte, error) { + bytes := make([]byte, 32) + sha3.ShakeSum128(bytes, data) + return bytes, nil +} + +func sumSHAKE256(data []byte, length int) ([]byte, error) { + bytes := make([]byte, 64) + sha3.ShakeSum256(bytes, data) + return bytes, nil +} + +func sumSHA3_384(data []byte, length int) ([]byte, error) { + a := sha3.Sum384(data) + return a[:], nil +} + +func sumSHA3_256(data []byte, length int) ([]byte, error) { + a := sha3.Sum256(data) + return a[:], nil +} + +func sumSHA3_224(data []byte, length int) ([]byte, error) { + a := sha3.Sum224(data) + return a[:], nil +} + +func registerStdlibHashFuncs() { + RegisterHashFunc(IDENTITY, sumID) + RegisterHashFunc(SHA1, sumSHA1) + RegisterHashFunc(SHA2_512, sumSHA512) + RegisterHashFunc(MD5, sumMD5) +} + +func registerNonStdlibHashFuncs() { + RegisterHashFunc(SHA2_256, sumSHA256) + RegisterHashFunc(DBL_SHA2_256, sumDoubleSHA256) + + RegisterHashFunc(KECCAK_256, sumKeccak256) + RegisterHashFunc(KECCAK_512, sumKeccak512) + + RegisterHashFunc(SHA3_224, sumSHA3_224) + RegisterHashFunc(SHA3_256, sumSHA3_256) + RegisterHashFunc(SHA3_384, sumSHA3_384) + RegisterHashFunc(SHA3_512, sumSHA3_512) + + RegisterHashFunc(MURMUR3_128, sumMURMUR3) + + RegisterHashFunc(SHAKE_128, sumSHAKE128) + RegisterHashFunc(SHAKE_256, sumSHAKE256) + + // Blake family of hash functions + // BLAKE2S + // + // We only support 32byte (256 bit) + RegisterHashFunc(BLAKE2S_MIN+31, sumBlake2s32) + // BLAKE2B + for c := uint64(BLAKE2B_MIN); c <= BLAKE2B_MAX; c++ { + size := int(c - BLAKE2B_MIN + 1) + RegisterHashFunc(c, func(buf []byte, _ int) ([]byte, error) { + return sumBlake2b(buf, size) + }) + } +} + +func init() { + registerStdlibHashFuncs() + registerNonStdlibHashFuncs() +} + +// RegisterHashFunc adds an entry to the package-level code -> hash func map. +// The hash function must return at least the requested number of bytes. If it +// returns more, the hash will be truncated. +func RegisterHashFunc(code uint64, hashFunc HashFunc) error { + if !ValidCode(code) { + return fmt.Errorf("code %v not valid", code) + } + + _, ok := funcTable[code] + if ok { + return fmt.Errorf("hash func for code %v already registered", code) + } + + funcTable[code] = hashFunc + return nil +} diff --git a/vendor/github.com/multiformats/go-varint/.travis.yml b/vendor/github.com/multiformats/go-varint/.travis.yml new file mode 100644 index 0000000000..248d09b672 --- /dev/null +++ b/vendor/github.com/multiformats/go-varint/.travis.yml @@ -0,0 +1,30 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + - GO111MODULE=on + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + +cache: + directories: + - $GOPATH/pkg/mod + - /home/travis/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/multiformats/go-varint/LICENSE b/vendor/github.com/multiformats/go-varint/LICENSE new file mode 100644 index 0000000000..14121ca71d --- /dev/null +++ b/vendor/github.com/multiformats/go-varint/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 Protocol Labs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/multiformats/go-varint/README.md b/vendor/github.com/multiformats/go-varint/README.md new file mode 100644 index 0000000000..57f0a4a024 --- /dev/null +++ b/vendor/github.com/multiformats/go-varint/README.md @@ -0,0 +1,35 @@ +# go-varint + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![](https://img.shields.io/badge/project-multiformats-blue.svg?style=flat-square)](https://github.com/multiformats/multiformats) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23ipfs) +[![](https://img.shields.io/badge/readme%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) +[![GoDoc](https://godoc.org/github.com/multiformats/go-varint?status.svg)](https://godoc.org/github.com/multiformats/go-varint) +[![Travis CI](https://img.shields.io/travis/multiformats/go-varint.svg?style=flat-square&branch=master)](https://travis-ci.org/multiformats/go-varint) +[![codecov.io](https://img.shields.io/codecov/c/github/multiformats/go-varint.svg?style=flat-square&branch=master)](https://codecov.io/github/multiformats/go-varint?branch=master) + +> Varint helpers that enforce minimal encoding. + +## Table of Contents + +- [Install](#install) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +go get github.com/multiformats/go-varint +``` + +## Contribute + +Contributions welcome. Please check out [the issues](https://github.com/multiformats/go-multiaddr/issues). + +Check out our [contributing document](https://github.com/multiformats/multiformats/blob/master/contributing.md) for more information on how we work, and about contributing in general. Please be aware that all interactions related to multiformats are subject to the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md). + +Small note: If editing the README, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +[MIT](LICENSE) © 2019 Protocol Labs diff --git a/vendor/github.com/multiformats/go-varint/codecov.yml b/vendor/github.com/multiformats/go-varint/codecov.yml new file mode 100644 index 0000000000..ca8100ab11 --- /dev/null +++ b/vendor/github.com/multiformats/go-varint/codecov.yml @@ -0,0 +1,2 @@ +ignore: + - "multiaddr" diff --git a/vendor/github.com/multiformats/go-varint/go.mod b/vendor/github.com/multiformats/go-varint/go.mod new file mode 100644 index 0000000000..f32e76408e --- /dev/null +++ b/vendor/github.com/multiformats/go-varint/go.mod @@ -0,0 +1,3 @@ +module github.com/multiformats/go-varint + +go 1.12 diff --git a/vendor/github.com/multiformats/go-varint/varint.go b/vendor/github.com/multiformats/go-varint/varint.go new file mode 100644 index 0000000000..47340d9b29 --- /dev/null +++ b/vendor/github.com/multiformats/go-varint/varint.go @@ -0,0 +1,116 @@ +package varint + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +var ( + ErrOverflow = errors.New("varints larger than uint63 not supported") + ErrUnderflow = errors.New("varints malformed, could not reach the end") + ErrNotMinimal = errors.New("varint not minimally encoded") +) + +const ( + // MaxLenUvarint63 is the maximum number of bytes representing an uvarint in + // this encoding, supporting a maximum value of 2^63 (uint63), aka + // MaxValueUvarint63. + MaxLenUvarint63 = 9 + + // MaxValueUvarint63 is the maximum encodable uint63 value. + MaxValueUvarint63 = (1 << 63) - 1 +) + +// UvarintSize returns the size (in bytes) of `num` encoded as a unsigned varint. +// +// This may return a size greater than MaxUvarintLen63, which would be an +// illegal value, and would be rejected by readers. +func UvarintSize(num uint64) int { + bits := bits.Len64(num) + q, r := bits/7, bits%7 + size := q + if r > 0 || size == 0 { + size++ + } + return size +} + +// ToUvarint converts an unsigned integer to a varint-encoded []byte +func ToUvarint(num uint64) []byte { + buf := make([]byte, UvarintSize(num)) + n := binary.PutUvarint(buf, uint64(num)) + return buf[:n] +} + +// FromUvarint reads an unsigned varint from the beginning of buf, returns the +// varint, and the number of bytes read. +func FromUvarint(buf []byte) (uint64, int, error) { + // Modified from the go standard library. Copyright the Go Authors and + // released under the BSD License. + var x uint64 + var s uint + for i, b := range buf { + if (i == 8 && b >= 0x80) || i >= MaxLenUvarint63 { + // this is the 9th and last byte we're willing to read, but it + // signals there's more (1 in MSB). + // or this is the >= 10th byte, and for some reason we're still here. + return 0, 0, ErrOverflow + } + if b < 0x80 { + if b == 0 && s > 0 { + return 0, 0, ErrNotMinimal + } + return x | uint64(b)<= 0x80) || i >= MaxLenUvarint63 { + // this is the 9th and last byte we're willing to read, but it + // signals there's more (1 in MSB). + // or this is the >= 10th byte, and for some reason we're still here. + return 0, ErrOverflow + } + if b < 0x80 { + if b == 0 && s > 0 { + return 0, ErrNotMinimal + } + return x | uint64(b)< +- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) +- Use Set() instead of Add() in HTTPHeadersCarrier (#191) +- Update license to Apache 2.0 (#181) +- Replace 'golang.org/x/net/context' with 'context' (#176) +- Port of Python opentracing/harness/api_check.py to Go (#146) +- Fix race condition in MockSpan.Context() (#170) +- Add PeerHostIPv4.SetString() (#155) +- Add a Noop log field type to log to allow for optional fields (#150) + + +1.0.2 (2017-04-26) +------------------- + +- Add more semantic tags (#139) + + +1.0.1 (2017-02-06) +------------------- + +- Correct spelling in comments +- Address race in nextMockID() (#123) +- log: avoid panic marshaling nil error (#131) +- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) +- Drop Go 1.5 that fails in Travis (#129) +- Add convenience methods Key() and Value() to log.Field +- Add convenience methods to log.Field (2 years, 6 months ago) + +1.0.0 (2016-09-26) +------------------- + +- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) + diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE new file mode 100644 index 0000000000..f0027349e8 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile new file mode 100644 index 0000000000..62abb63f58 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/Makefile @@ -0,0 +1,20 @@ +.DEFAULT_GOAL := test-and-lint + +.PHONY: test-and-lint +test-and-lint: test lint + +.PHONY: test +test: + go test -v -cover -race ./... + +.PHONY: cover +cover: + go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... + +.PHONY: lint +lint: + go fmt ./... + golint ./... + @# Run again with magic to exit non-zero if golint outputs anything. + @! (golint ./... | read dummy) + go vet ./... diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md new file mode 100644 index 0000000000..6ef1d7c9d2 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -0,0 +1,171 @@ +[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) + +# OpenTracing API for Go + +This package is a Go platform API for OpenTracing. + +## Required Reading + +In order to understand the Go platform API, one must first be familiar with the +[OpenTracing project](https://opentracing.io) and +[terminology](https://opentracing.io/specification/) more specifically. + +## API overview for those adding instrumentation + +Everyday consumers of this `opentracing` package really only need to worry +about a couple of key abstractions: the `StartSpan` function, the `Span` +interface, and binding a `Tracer` at `main()`-time. Here are code snippets +demonstrating some important use cases. + +#### Singleton initialization + +The simplest starting point is `./default_tracer.go`. As early as possible, call + +```go + import "github.com/opentracing/opentracing-go" + import ".../some_tracing_impl" + + func main() { + opentracing.SetGlobalTracer( + // tracing impl specific: + some_tracing_impl.New(...), + ) + ... + } +``` + +#### Non-Singleton initialization + +If you prefer direct control to singletons, manage ownership of the +`opentracing.Tracer` implementation explicitly. + +#### Creating a Span given an existing Go `context.Context` + +If you use `context.Context` in your application, OpenTracing's Go library will +happily rely on it for `Span` propagation. To start a new (blocking child) +`Span`, you can use `StartSpanFromContext`. + +```go + func xyz(ctx context.Context, ...) { + ... + span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") + defer span.Finish() + span.LogFields( + log.String("event", "soft error"), + log.String("type", "cache timeout"), + log.Int("waited.millis", 1500)) + ... + } +``` + +#### Starting an empty trace by creating a "root span" + +It's always possible to create a "root" `Span` with no parent or other causal +reference. + +```go + func xyz() { + ... + sp := opentracing.StartSpan("operation_name") + defer sp.Finish() + ... + } +``` + +#### Creating a (child) Span given an existing (parent) Span + +```go + func xyz(parentSpan opentracing.Span, ...) { + ... + sp := opentracing.StartSpan( + "operation_name", + opentracing.ChildOf(parentSpan.Context())) + defer sp.Finish() + ... + } +``` + +#### Serializing to the wire + +```go + func makeSomeRequest(ctx context.Context) ... { + if span := opentracing.SpanFromContext(ctx); span != nil { + httpClient := &http.Client{} + httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) + + // Transmit the span's TraceContext as HTTP headers on our + // outbound request. + opentracing.GlobalTracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(httpReq.Header)) + + resp, err := httpClient.Do(httpReq) + ... + } + ... + } +``` + +#### Deserializing from the wire + +```go + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + var serverSpan opentracing.Span + appSpecificOperationName := ... + wireContext, err := opentracing.GlobalTracer().Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + if err != nil { + // Optionally record something about err here + } + + // Create the span referring to the RPC client if available. + // If wireContext == nil, a root span will be created. + serverSpan = opentracing.StartSpan( + appSpecificOperationName, + ext.RPCServerOption(wireContext)) + + defer serverSpan.Finish() + + ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) + ... + } +``` + +#### Conditionally capture a field using `log.Noop` + +In some situations, you may want to dynamically decide whether or not +to log a field. For example, you may want to capture additional data, +such as a customer ID, in non-production environments: + +```go + func Customer(order *Order) log.Field { + if os.Getenv("ENVIRONMENT") == "dev" { + return log.String("customer", order.Customer.ID) + } + return log.Noop() + } +``` + +#### Goroutine-safety + +The entire public API is goroutine-safe and does not require external +synchronization. + +## API pointers for those implementing a tracing system + +Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. + +## API compatibility + +For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go new file mode 100644 index 0000000000..e11977ebe8 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext.go @@ -0,0 +1,24 @@ +package opentracing + +import ( + "context" +) + +// TracerContextWithSpanExtension is an extension interface that the +// implementation of the Tracer interface may want to implement. It +// allows to have some control over the go context when the +// ContextWithSpan is invoked. +// +// The primary purpose of this extension are adapters from opentracing +// API to some other tracing API. +type TracerContextWithSpanExtension interface { + // ContextWithSpanHook gets called by the ContextWithSpan + // function, when the Tracer implementation also implements + // this interface. It allows to put extra information into the + // context and make it available to the callers of the + // ContextWithSpan. + // + // This hook is invoked before the ContextWithSpan function + // actually puts the span into the context. + ContextWithSpanHook(ctx context.Context, span Span) context.Context +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go new file mode 100644 index 0000000000..8282bd7584 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go @@ -0,0 +1,17 @@ +package ext + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" +) + +// LogError sets the error=true tag on the Span and logs err as an "error" event. +func LogError(span opentracing.Span, err error, fields ...log.Field) { + Error.Set(span, true) + ef := []log.Field{ + log.Event("error"), + log.Error(err), + } + ef = append(ef, fields...) + span.LogFields(ef...) +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field_test.go b/vendor/github.com/opentracing/opentracing-go/ext/field_test.go new file mode 100644 index 0000000000..d4f633e355 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/field_test.go @@ -0,0 +1,50 @@ +package ext_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "github.com/opentracing/opentracing-go/mocktracer" +) + +func TestLogError(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace") + ext.Component.Set(span, "my-awesome-library") + ext.SamplingPriority.Set(span, 1) + err := fmt.Errorf("my error") + ext.LogError(span, err, log.Message("my optional msg text")) + + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "component": "my-awesome-library", + "error": true, + }, rawSpan.Tags()) + + assert.Equal(t, len(rawSpan.Logs()), 1) + fields := rawSpan.Logs()[0].Fields + assert.Equal(t, []mocktracer.MockKeyValue{ + { + Key: "event", + ValueKind: reflect.String, + ValueString: "error", + }, + { + Key: "error.object", + ValueKind: reflect.String, + ValueString: err.Error(), + }, + { + Key: "message", + ValueKind: reflect.String, + ValueString: "my optional msg text", + }, + }, fields) +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go new file mode 100644 index 0000000000..a414b5951f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -0,0 +1,215 @@ +package ext + +import "github.com/opentracing/opentracing-go" + +// These constants define common tag names recommended for better portability across +// tracing systems and languages/platforms. +// +// The tag names are defined as typed strings, so that in addition to the usual use +// +// span.setTag(TagName, value) +// +// they also support value type validation via this additional syntax: +// +// TagName.Set(span, value) +// +var ( + ////////////////////////////////////////////////////////////////////// + // SpanKind (client/server or producer/consumer) + ////////////////////////////////////////////////////////////////////// + + // SpanKind hints at relationship between spans, e.g. client/server + SpanKind = spanKindTagName("span.kind") + + // SpanKindRPCClient marks a span representing the client-side of an RPC + // or other remote call + SpanKindRPCClientEnum = SpanKindEnum("client") + SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} + + // SpanKindRPCServer marks a span representing the server-side of an RPC + // or other remote call + SpanKindRPCServerEnum = SpanKindEnum("server") + SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + + ////////////////////////////////////////////////////////////////////// + // Component name + ////////////////////////////////////////////////////////////////////// + + // Component is a low-cardinality identifier of the module, library, + // or package that is generating a span. + Component = StringTagName("component") + + ////////////////////////////////////////////////////////////////////// + // Sampling hint + ////////////////////////////////////////////////////////////////////// + + // SamplingPriority determines the priority of sampling this Span. + SamplingPriority = Uint16TagName("sampling.priority") + + ////////////////////////////////////////////////////////////////////// + // Peer tags. These tags can be emitted by either client-side or + // server-side to describe the other side/service in a peer-to-peer + // communications, like an RPC call. + ////////////////////////////////////////////////////////////////////// + + // PeerService records the service name of the peer. + PeerService = StringTagName("peer.service") + + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = StringTagName("peer.address") + + // PeerHostname records the host name of the peer + PeerHostname = StringTagName("peer.hostname") + + // PeerHostIPv4 records IP v4 host address of the peer + PeerHostIPv4 = IPv4TagName("peer.ipv4") + + // PeerHostIPv6 records IP v6 host address of the peer + PeerHostIPv6 = StringTagName("peer.ipv6") + + // PeerPort records port number of the peer + PeerPort = Uint16TagName("peer.port") + + ////////////////////////////////////////////////////////////////////// + // HTTP Tags + ////////////////////////////////////////////////////////////////////// + + // HTTPUrl should be the URL of the request being handled in this segment + // of the trace, in standard URI format. The protocol is optional. + HTTPUrl = StringTagName("http.url") + + // HTTPMethod is the HTTP method of the request, and is case-insensitive. + HTTPMethod = StringTagName("http.method") + + // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the + // HTTP response. + HTTPStatusCode = Uint16TagName("http.status_code") + + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = StringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = StringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = StringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = StringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = StringTagName("message_bus.destination") + + ////////////////////////////////////////////////////////////////////// + // Error Tag + ////////////////////////////////////////////////////////////////////// + + // Error indicates that operation represented by the span resulted in an error. + Error = BoolTagName("error") +) + +// --- + +// SpanKindEnum represents common span types +type SpanKindEnum string + +type spanKindTagName string + +// Set adds a string tag to the `span` +func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { + span.SetTag(string(tag), value) +} + +type rpcServerOption struct { + clientContext opentracing.SpanContext +} + +func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { + if r.clientContext != nil { + opentracing.ChildOf(r.clientContext).Apply(o) + } + SpanKindRPCServer.Apply(o) +} + +// RPCServerOption returns a StartSpanOption appropriate for an RPC server span +// with `client` representing the metadata for the remote peer Span if available. +// In case client == nil, due to the client not being instrumented, this RPC +// server span will be a root span. +func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { + return rpcServerOption{client} +} + +// --- + +// StringTagName is a common tag name to be set to a string value +type StringTagName string + +// Set adds a string tag to the `span` +func (tag StringTagName) Set(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} + +// --- + +// Uint32TagName is a common tag name to be set to a uint32 value +type Uint32TagName string + +// Set adds a uint32 tag to the `span` +func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// --- + +// Uint16TagName is a common tag name to be set to a uint16 value +type Uint16TagName string + +// Set adds a uint16 tag to the `span` +func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { + span.SetTag(string(tag), value) +} + +// --- + +// BoolTagName is a common tag name to be set to a bool value +type BoolTagName string + +// Set adds a bool tag to the `span` +func (tag BoolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} + +// IPv4TagName is a common tag name to be set to an ipv4 value +type IPv4TagName string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag IPv4TagName) SetString(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags_test.go b/vendor/github.com/opentracing/opentracing-go/ext/tags_test.go new file mode 100644 index 0000000000..ea9af335c7 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags_test.go @@ -0,0 +1,148 @@ +package ext_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/mocktracer" +) + +func TestPeerTags(t *testing.T) { + if ext.PeerService != "peer.service" { + t.Fatalf("Invalid PeerService %v", ext.PeerService) + } + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace") + ext.PeerService.Set(span, "my-service") + ext.PeerAddress.Set(span, "my-hostname:8080") + ext.PeerHostname.Set(span, "my-hostname") + ext.PeerHostIPv4.Set(span, uint32(127<<24|1)) + ext.PeerHostIPv6.Set(span, "::") + ext.PeerPort.Set(span, uint16(8080)) + ext.SamplingPriority.Set(span, uint16(1)) + ext.SpanKind.Set(span, ext.SpanKindRPCServerEnum) + ext.SpanKindRPCClient.Set(span) + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "peer.service": "my-service", + "peer.address": "my-hostname:8080", + "peer.hostname": "my-hostname", + "peer.ipv4": uint32(127<<24 | 1), + "peer.ipv6": "::", + "peer.port": uint16(8080), + "span.kind": ext.SpanKindRPCClientEnum, + }, rawSpan.Tags()) + assert.True(t, span.Context().(mocktracer.MockSpanContext).Sampled) + ext.SamplingPriority.Set(span, uint16(0)) + assert.False(t, span.Context().(mocktracer.MockSpanContext).Sampled) +} + +func TestHTTPTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindRPCServer) + ext.HTTPUrl.Set(span, "test.biz/uri?protocol=false") + ext.HTTPMethod.Set(span, "GET") + ext.HTTPStatusCode.Set(span, 301) + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "http.url": "test.biz/uri?protocol=false", + "http.method": "GET", + "http.status_code": uint16(301), + "span.kind": ext.SpanKindRPCServerEnum, + }, rawSpan.Tags()) +} + +func TestDBTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindRPCClient) + ext.DBInstance.Set(span, "127.0.0.1:3306/customers") + ext.DBStatement.Set(span, "SELECT * FROM user_table") + ext.DBType.Set(span, "sql") + ext.DBUser.Set(span, "customer_user") + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "db.instance": "127.0.0.1:3306/customers", + "db.statement": "SELECT * FROM user_table", + "db.type": "sql", + "db.user": "customer_user", + "span.kind": ext.SpanKindRPCClientEnum, + }, rawSpan.Tags()) +} + +func TestMiscTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace") + ext.Component.Set(span, "my-awesome-library") + ext.SamplingPriority.Set(span, 1) + ext.Error.Set(span, true) + + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "component": "my-awesome-library", + "error": true, + }, rawSpan.Tags()) +} + +func TestRPCServerOption(t *testing.T) { + tracer := mocktracer.New() + parent := tracer.StartSpan("my-trace") + parent.SetBaggageItem("bag", "gage") + + carrier := opentracing.HTTPHeadersCarrier{} + err := tracer.Inject(parent.Context(), opentracing.HTTPHeaders, carrier) + if err != nil { + t.Fatal(err) + } + + parCtx, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + if err != nil { + t.Fatal(err) + } + + tracer.StartSpan("my-child", ext.RPCServerOption(parCtx)).Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "span.kind": ext.SpanKindRPCServerEnum, + }, rawSpan.Tags()) + assert.Equal(t, map[string]string{ + "bag": "gage", + }, rawSpan.Context().(mocktracer.MockSpanContext).Baggage) +} + +func TestMessageBusProducerTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindProducer) + ext.MessageBusDestination.Set(span, "topic name") + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "message_bus.destination": "topic name", + "span.kind": ext.SpanKindProducerEnum, + }, rawSpan.Tags()) +} + +func TestMessageBusConsumerTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindConsumer) + ext.MessageBusDestination.Set(span, "topic name") + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "message_bus.destination": "topic name", + "span.kind": ext.SpanKindConsumerEnum, + }, rawSpan.Tags()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 0000000000..4f7066a925 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,42 @@ +package opentracing + +type registeredTracer struct { + tracer Tracer + isRegistered bool +} + +var ( + globalTracer = registeredTracer{NoopTracer{}, false} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = registeredTracer{tracer, true} +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer.tracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.tracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} + +// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered +func IsGlobalTracerRegistered() bool { + return globalTracer.isRegistered +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer_test.go b/vendor/github.com/opentracing/opentracing-go/globaltracer_test.go new file mode 100644 index 0000000000..59fb3b4f7b --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer_test.go @@ -0,0 +1,26 @@ +package opentracing + +import ( + "reflect" + "testing" +) + +func TestIsGlobalTracerRegisteredDefaultIsFalse(t *testing.T) { + if IsGlobalTracerRegistered() { + t.Errorf("Should return false when no global tracer is registered.") + } +} + +func TestAfterSettingGlobalTracerIsGlobalTracerRegisteredReturnsTrue(t *testing.T) { + SetGlobalTracer(NoopTracer{}) + + if !IsGlobalTracerRegistered() { + t.Errorf("Should return true after a tracer has been registered.") + } +} + +func TestDefaultTracerIsNoopTracer(t *testing.T) { + if reflect.TypeOf(GlobalTracer()) != reflect.TypeOf(NoopTracer{}) { + t.Errorf("Should return false when no global tracer is registered.") + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod new file mode 100644 index 0000000000..bf48bb5d73 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/go.mod @@ -0,0 +1,5 @@ +module github.com/opentracing/opentracing-go + +go 1.14 + +require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/opentracing/opentracing-go/go.sum b/vendor/github.com/opentracing/opentracing-go/go.sum new file mode 100644 index 0000000000..4347755afe --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 0000000000..1831bc9b26 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,65 @@ +package opentracing + +import "context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// the span. If span is nil, a new context without an active span is returned. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + if span != nil { + if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { + ctx = tracerWithHook.ContextWithSpanHook(ctx, span) + } + } + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// StartSpanFromContextWithTracer starts and returns a span with `operationName` +// using a span found within the context as a ChildOfRef. If that doesn't exist +// it creates a root span. It also returns a context.Context object built +// around the returned span. +// +// It's behavior is identical to StartSpanFromContext except that it takes an explicit +// tracer as opposed to using the global tracer. +func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + } + span := tracer.StartSpan(operationName, opts...) + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext_test.go b/vendor/github.com/opentracing/opentracing-go/gocontext_test.go new file mode 100644 index 0000000000..c6bbad4e4b --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext_test.go @@ -0,0 +1,121 @@ +package opentracing + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestContextWithSpan(t *testing.T) { + span := &noopSpan{} + ctx := ContextWithSpan(context.Background(), span) + span2 := SpanFromContext(ctx) + if span != span2 { + t.Errorf("Not the same span returned from context, expected=%+v, actual=%+v", span, span2) + } + + ctx = context.Background() + span2 = SpanFromContext(ctx) + if span2 != nil { + t.Errorf("Expected nil span, found %+v", span2) + } + + ctx = ContextWithSpan(ctx, span) + span2 = SpanFromContext(ctx) + if span != span2 { + t.Errorf("Not the same span returned from context, expected=%+v, actual=%+v", span, span2) + } + + ctx = ContextWithSpan(ctx, nil) + if s := SpanFromContext(ctx); s != nil { + t.Errorf("Not able to reset span in context, expected=nil, actual=%+v", s) + } +} + +type noopExtTracer struct { + NoopTracer +} + +type noopExtTracerCtxType struct{} + +func (noopExtTracer) ContextWithSpanHook(ctx context.Context, span Span) context.Context { + return context.WithValue(ctx, noopExtTracerCtxType{}, noopExtTracerCtxType{}) +} + +var _ Tracer = noopExtTracer{} +var _ TracerContextWithSpanExtension = noopExtTracer{} + +type noopExtSpan struct { + noopSpan +} + +func (noopExtSpan) Tracer() Tracer { + return noopExtTracer{} +} + +var _ Span = noopExtSpan{} + +func TestContextWithSpanWithExtension(t *testing.T) { + span := &noopExtSpan{} + ctx := ContextWithSpan(context.Background(), span) + span2 := SpanFromContext(ctx) + if span != span2 { + t.Errorf("Not the same span returned from context, expected=%+v, actual=%+v", span, span2) + } + if _, ok := ctx.Value(noopExtTracerCtxType{}).(noopExtTracerCtxType); !ok { + t.Error("ContextWithSpanHook was not called") + } +} + +func TestStartSpanFromContext(t *testing.T) { + testTracer := testTracer{} + + // Test the case where there *is* a Span in the Context. + { + parentSpan := &testSpan{} + parentCtx := ContextWithSpan(context.Background(), parentSpan) + childSpan, childCtx := StartSpanFromContextWithTracer(parentCtx, testTracer, "child") + if !childSpan.Context().(testSpanContext).HasParent { + t.Errorf("Failed to find parent: %v", childSpan) + } + if !childSpan.(testSpan).Equal(SpanFromContext(childCtx)) { + t.Errorf("Unable to find child span in context: %v", childCtx) + } + } + + // Test the case where there *is not* a Span in the Context. + { + emptyCtx := context.Background() + childSpan, childCtx := StartSpanFromContextWithTracer(emptyCtx, testTracer, "child") + if childSpan.Context().(testSpanContext).HasParent { + t.Errorf("Should not have found parent: %v", childSpan) + } + if !childSpan.(testSpan).Equal(SpanFromContext(childCtx)) { + t.Errorf("Unable to find child span in context: %v", childCtx) + } + } +} + +func TestStartSpanFromContextOptions(t *testing.T) { + testTracer := testTracer{} + + // Test options are passed to tracer + + startTime := time.Now().Add(-10 * time.Second) // ten seconds ago + span, ctx := StartSpanFromContextWithTracer( + context.Background(), testTracer, "parent", StartTime(startTime), Tag{"component", "test"}) + + assert.Equal(t, "test", span.(testSpan).Tags["component"]) + assert.Equal(t, startTime, span.(testSpan).StartTime) + + // Test it also works for a child span + + childStartTime := startTime.Add(3 * time.Second) + childSpan, _ := StartSpanFromContextWithTracer( + ctx, testTracer, "child", StartTime(childStartTime)) + + assert.Equal(t, childSpan.(testSpan).Tags["component"], nil) + assert.Equal(t, childSpan.(testSpan).StartTime, childStartTime) +} diff --git a/vendor/github.com/opentracing/opentracing-go/harness/api_checkers.go b/vendor/github.com/opentracing/opentracing-go/harness/api_checkers.go new file mode 100644 index 0000000000..5ae7fb0252 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/harness/api_checkers.go @@ -0,0 +1,472 @@ +/* + +Package harness provides a suite of API compatibility checks. They were originally ported from the +OpenTracing Python library's "harness" module. + +To run this test suite against your tracer, call harness.RunAPIChecks and provide it a function +that returns a Tracer implementation and a function to call to close it. The function will be +called to create a new tracer before each test in the suite is run, and the returned closer function +will be called after each test is finished. + +Several options provide additional checks for your Tracer's behavior: CheckBaggageValues(true) +indicates your tracer supports baggage propagation, CheckExtract(true) tells the suite to test if +the Tracer can extract a trace context from text and binary carriers, and CheckInject(true) tests +if the Tracer can inject the trace context into a carrier. + +The UseProbe option provides an APICheckProbe implementation that allows the test suite to +additionally check if two Spans are part of the same trace, and if a Span and a SpanContext +are part of the same trace. Implementing an APICheckProbe provides additional assertions that +your tracer is working properly. + +*/ +package harness + +import ( + "bytes" + "testing" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +// APICheckCapabilities describes capabilities of a Tracer that should be checked by APICheckSuite. +type APICheckCapabilities struct { + CheckBaggageValues bool // whether to check for propagation of baggage values + CheckExtract bool // whether to check if extracting contexts from carriers works + CheckInject bool // whether to check if injecting contexts works + Probe APICheckProbe // optional interface providing methods to check recorded data +} + +// APICheckProbe exposes methods for testing data recorded by a Tracer. +type APICheckProbe interface { + // SameTrace helps tests assert that this tracer's spans are from the same trace. + SameTrace(first, second opentracing.Span) bool + // SameSpanContext helps tests assert that a span and a context are from the same trace and span. + SameSpanContext(opentracing.Span, opentracing.SpanContext) bool +} + +// APICheckSuite is a testify suite for checking a Tracer against the OpenTracing API. +type APICheckSuite struct { + suite.Suite + opts APICheckCapabilities + newTracer func() (tracer opentracing.Tracer, closer func()) + tracer opentracing.Tracer + closer func() +} + +// RunAPIChecks runs a test suite to check a Tracer against the OpenTracing API. +// It is provided a function that will be executed to create and destroy a tracer for each test +// in the suite, and the given APICheckOption functional options `opts`. +func RunAPIChecks( + t *testing.T, + newTracer func() (tracer opentracing.Tracer, closer func()), + opts ...APICheckOption, +) { + s := &APICheckSuite{newTracer: newTracer} + for _, opt := range opts { + opt(s) + } + suite.Run(t, s) +} + +// APICheckOption instances may be passed to NewAPICheckSuite. +type APICheckOption func(*APICheckSuite) + +// CheckBaggageValues returns an option that sets whether to check for propagation of baggage values. +func CheckBaggageValues(val bool) APICheckOption { + return func(s *APICheckSuite) { + s.opts.CheckBaggageValues = val + } +} + +// CheckExtract returns an option that sets whether to check if extracting contexts from carriers works. +func CheckExtract(val bool) APICheckOption { + return func(s *APICheckSuite) { + s.opts.CheckExtract = val + } +} + +// CheckInject returns an option that sets whether to check if injecting contexts works. +func CheckInject(val bool) APICheckOption { + return func(s *APICheckSuite) { + s.opts.CheckInject = val + } +} + +// CheckEverything returns an option that enables all API checks. +func CheckEverything() APICheckOption { + return func(s *APICheckSuite) { + s.opts.CheckBaggageValues = true + s.opts.CheckExtract = true + s.opts.CheckInject = true + } +} + +// UseProbe returns an option that specifies an APICheckProbe implementation to use. +func UseProbe(probe APICheckProbe) APICheckOption { + return func(s *APICheckSuite) { + s.opts.Probe = probe + } +} + +// SetupTest creates a tracer for this specific test invocation. +func (s *APICheckSuite) SetupTest() { + s.tracer, s.closer = s.newTracer() + if s.tracer == nil { + s.T().Fatalf("newTracer returned nil Tracer") + } +} + +// TearDownTest closes the tracer, and clears the test-specific tracer. +func (s *APICheckSuite) TearDownTest() { + if s.closer != nil { + s.closer() + } + s.tracer, s.closer = nil, nil +} + +// TestStartSpan checks if a Tracer can start a span and calls some span API methods. +func (s *APICheckSuite) TestStartSpan() { + span := s.tracer.StartSpan( + "Fry", + opentracing.Tag{Key: "birthday", Value: "August 14 1974"}) + span.LogFields( + log.String("hospital", "Brooklyn Pre-Med Hospital"), + log.String("city", "Old New York")) + span.Finish() +} + +// TestStartSpanWithParent checks if a Tracer can start a span with a specified parent. +func (s *APICheckSuite) TestStartSpanWithParent() { + parentSpan := s.tracer.StartSpan("Turanga Munda") + s.NotNil(parentSpan) + + childFns := []func(opentracing.SpanContext) opentracing.SpanReference{ + opentracing.ChildOf, + opentracing.FollowsFrom, + } + for _, childFn := range childFns { + span := s.tracer.StartSpan( + "Leela", + childFn(parentSpan.Context()), + opentracing.Tag{Key: "birthplace", Value: "sewers"}) + span.Finish() + if s.opts.Probe != nil { + s.True(s.opts.Probe.SameTrace(parentSpan, span)) + } else { + s.T().Log("harness.Probe not specified, skipping") + } + } + + parentSpan.Finish() +} + +// TestSetOperationName attempts to set the operation name on a span after it has been created. +func (s *APICheckSuite) TestSetOperationName() { + span := s.tracer.StartSpan("").SetOperationName("Farnsworth") + span.Finish() +} + +// TestSpanTagValueTypes sets tags using values of different types. +func (s *APICheckSuite) TestSpanTagValueTypes() { + span := s.tracer.StartSpan("ManyTypes") + span. + SetTag("an_int", 9). + SetTag("a_bool", true). + SetTag("a_string", "aoeuidhtns") +} + +// TestSpanTagsWithChaining tests chaining of calls to SetTag. +func (s *APICheckSuite) TestSpanTagsWithChaining() { + span := s.tracer.StartSpan("Farnsworth") + span. + SetTag("birthday", "9 April, 2841"). + SetTag("loves", "different lengths of wires") + span. + SetTag("unicode_val", "non-ascii: \u200b"). + SetTag("unicode_key_\u200b", "ascii val") + span.Finish() +} + +// TestSpanLogs tests calls to log keys and values with spans. +func (s *APICheckSuite) TestSpanLogs() { + span := s.tracer.StartSpan("Fry") + span.LogKV( + "event", "frozen", + "year", 1999, + "place", "Cryogenics Labs") + span.LogKV( + "event", "defrosted", + "year", 2999, + "place", "Cryogenics Labs") + + ts := time.Now() + span.FinishWithOptions(opentracing.FinishOptions{ + LogRecords: []opentracing.LogRecord{ + { + Timestamp: ts, + Fields: []log.Field{ + log.String("event", "job-assignment"), + log.String("type", "delivery boy"), + }, + }, + }}) + + // Test deprecated log methods + span.LogEvent("an arbitrary event") + span.LogEventWithPayload("y", "z") + span.Log(opentracing.LogData{Event: "y", Payload: "z"}) +} + +func assertEmptyBaggage(t *testing.T, spanContext opentracing.SpanContext) { + if !assert.NotNil(t, spanContext, "assertEmptyBaggage got empty context") { + return + } + spanContext.ForeachBaggageItem(func(k, v string) bool { + assert.Fail(t, "new span shouldn't have baggage") + return false + }) +} + +// TestSpanBaggage tests calls to set and get span baggage, and if the CheckBaggageValues option +// is set, asserts that baggage values were successfully retrieved. +func (s *APICheckSuite) TestSpanBaggage() { + span := s.tracer.StartSpan("Fry") + assertEmptyBaggage(s.T(), span.Context()) + + spanRef := span.SetBaggageItem("Kiff-loves", "Amy") + s.Exactly(spanRef, span) + + val := span.BaggageItem("Kiff-loves") + if s.opts.CheckBaggageValues { + s.Equal("Amy", val) + } else { + s.T().Log("CheckBaggageValues capability not set, skipping") + } + span.Finish() +} + +// TestContextBaggage tests calls to set and get span baggage, and if the CheckBaggageValues option +// is set, asserts that baggage values were successfully retrieved from the span's SpanContext. +func (s *APICheckSuite) TestContextBaggage() { + span := s.tracer.StartSpan("Fry") + assertEmptyBaggage(s.T(), span.Context()) + + span.SetBaggageItem("Kiff-loves", "Amy") + if s.opts.CheckBaggageValues { + called := false + span.Context().ForeachBaggageItem(func(k, v string) bool { + s.False(called) + called = true + s.Equal("Kiff-loves", k) + s.Equal("Amy", v) + return true + }) + } else { + s.T().Log("CheckBaggageValues capability not set, skipping") + } + span.Finish() +} + +// TestTextPropagation tests if the Tracer can Inject a span into a TextMapCarrier, and later Extract it. +// If CheckExtract is set, it will check if Extract was successful (returned no error). If a Probe is set, +// it will check if the extracted context is in the same trace as the original span. +func (s *APICheckSuite) TestTextPropagation() { + span := s.tracer.StartSpan("Bender") + textCarrier := opentracing.TextMapCarrier{} + err := span.Tracer().Inject(span.Context(), opentracing.TextMap, textCarrier) + assert.NoError(s.T(), err) + + extractedContext, err := s.tracer.Extract(opentracing.TextMap, textCarrier) + if s.opts.CheckExtract { + s.NoError(err) + assertEmptyBaggage(s.T(), extractedContext) + } else { + s.T().Log("CheckExtract capability not set, skipping") + } + if s.opts.Probe != nil { + s.True(s.opts.Probe.SameSpanContext(span, extractedContext)) + } else { + s.T().Log("harness.Probe not specified, skipping") + } + span.Finish() +} + +// TestHTTPPropagation tests if the Tracer can Inject a span into HTTP headers, and later Extract it. +// If CheckExtract is set, it will check if Extract was successful (returned no error). If a Probe is set, +// it will check if the extracted context is in the same trace as the original span. +func (s *APICheckSuite) TestHTTPPropagation() { + span := s.tracer.StartSpan("Bender") + textCarrier := opentracing.HTTPHeadersCarrier{} + err := span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders, textCarrier) + s.NoError(err) + + extractedContext, err := s.tracer.Extract(opentracing.HTTPHeaders, textCarrier) + if s.opts.CheckExtract { + s.NoError(err) + assertEmptyBaggage(s.T(), extractedContext) + } else { + s.T().Log("CheckExtract capability not set, skipping") + } + if s.opts.Probe != nil { + s.True(s.opts.Probe.SameSpanContext(span, extractedContext)) + } else { + s.T().Log("harness.Probe not specified, skipping") + } + span.Finish() +} + +// TestBinaryPropagation tests if the Tracer can Inject a span into a binary buffer, and later Extract it. +// If CheckExtract is set, it will check if Extract was successful (returned no error). If a Probe is set, +// it will check if the extracted context is in the same trace as the original span. +func (s *APICheckSuite) TestBinaryPropagation() { + span := s.tracer.StartSpan("Bender") + buf := new(bytes.Buffer) + err := span.Tracer().Inject(span.Context(), opentracing.Binary, buf) + s.NoError(err) + + extractedContext, err := s.tracer.Extract(opentracing.Binary, buf) + if s.opts.CheckExtract { + s.NoError(err) + assertEmptyBaggage(s.T(), extractedContext) + } else { + s.T().Log("CheckExtract capability not set, skipping") + } + if s.opts.Probe != nil { + s.True(s.opts.Probe.SameSpanContext(span, extractedContext)) + } else { + s.T().Log("harness.Probe not specified, skipping") + } + span.Finish() +} + +// TestMandatoryFormats tests if all mandatory carrier formats are supported. If CheckExtract is set, it +// will check if the call to Extract was successful (returned no error such as ErrUnsupportedFormat). +func (s *APICheckSuite) TestMandatoryFormats() { + formats := []struct{ Format, Carrier interface{} }{ + {opentracing.TextMap, opentracing.TextMapCarrier{}}, + {opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier{}}, + {opentracing.Binary, new(bytes.Buffer)}, + } + span := s.tracer.StartSpan("Bender") + for _, fmtCarrier := range formats { + err := span.Tracer().Inject(span.Context(), fmtCarrier.Format, fmtCarrier.Carrier) + s.NoError(err) + spanCtx, err := s.tracer.Extract(fmtCarrier.Format, fmtCarrier.Carrier) + if s.opts.CheckExtract { + s.NoError(err) + assertEmptyBaggage(s.T(), spanCtx) + } else { + s.T().Log("CheckExtract capability not set, skipping") + } + } +} + +// TestUnknownFormat checks if attempting to Inject or Extract using an unsupported format +// returns ErrUnsupportedFormat, if CheckInject and CheckExtract are set. +func (s *APICheckSuite) TestUnknownFormat() { + customFormat := "kiss my shiny metal ..." + span := s.tracer.StartSpan("Bender") + + err := span.Tracer().Inject(span.Context(), customFormat, nil) + if s.opts.CheckInject { + s.Equal(opentracing.ErrUnsupportedFormat, err) + } else { + s.T().Log("CheckInject capability not set, skipping") + } + ctx, err := s.tracer.Extract(customFormat, nil) + s.Nil(ctx) + if s.opts.CheckExtract { + s.Equal(opentracing.ErrUnsupportedFormat, err) + } else { + s.T().Log("CheckExtract capability not set, skipping") + } +} + +// ForeignSpanContext satisfies the opentracing.SpanContext interface, but otherwise does nothing. +type ForeignSpanContext struct{} + +// ForeachBaggageItem could call handler for each baggage KV, but does nothing. +func (f ForeignSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// NotACarrier does not satisfy any of the opentracing carrier interfaces. +type NotACarrier struct{} + +// TestInvalidInject checks if errors are returned when Inject is called with invalid inputs. +func (s *APICheckSuite) TestInvalidInject() { + if !s.opts.CheckInject { + s.T().Skip("CheckInject capability not set, skipping") + } + span := s.tracer.StartSpan("op") + + // binary inject + err := span.Tracer().Inject(ForeignSpanContext{}, opentracing.Binary, new(bytes.Buffer)) + s.Equal(opentracing.ErrInvalidSpanContext, err, "Foreign SpanContext should return invalid error") + err = span.Tracer().Inject(span.Context(), opentracing.Binary, NotACarrier{}) + s.Equal(opentracing.ErrInvalidCarrier, err, "Carrier that's not io.Writer should return error") + + // text inject + err = span.Tracer().Inject(ForeignSpanContext{}, opentracing.TextMap, opentracing.TextMapCarrier{}) + s.Equal(opentracing.ErrInvalidSpanContext, err, "Foreign SpanContext should return invalid error") + err = span.Tracer().Inject(span.Context(), opentracing.TextMap, NotACarrier{}) + s.Equal(opentracing.ErrInvalidCarrier, err, "Carrier that's not TextMapWriter should return error") + + // HTTP inject + err = span.Tracer().Inject(ForeignSpanContext{}, opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier{}) + s.Equal(opentracing.ErrInvalidSpanContext, err, "Foreign SpanContext should return invalid error") + err = span.Tracer().Inject(span.Context(), opentracing.HTTPHeaders, NotACarrier{}) + s.Equal(opentracing.ErrInvalidCarrier, err, "Carrier that's not TextMapWriter should return error") +} + +// TestInvalidExtract checks if errors are returned when Extract is called with invalid inputs. +func (s *APICheckSuite) TestInvalidExtract() { + if !s.opts.CheckExtract { + s.T().Skip("CheckExtract capability not set, skipping") + } + span := s.tracer.StartSpan("op") + + // binary extract + ctx, err := span.Tracer().Extract(opentracing.Binary, NotACarrier{}) + s.Equal(opentracing.ErrInvalidCarrier, err, "Carrier that's not io.Reader should return error") + s.Nil(ctx) + + // text extract + ctx, err = span.Tracer().Extract(opentracing.TextMap, NotACarrier{}) + s.Equal(opentracing.ErrInvalidCarrier, err, "Carrier that's not TextMapReader should return error") + s.Nil(ctx) + + // HTTP extract + ctx, err = span.Tracer().Extract(opentracing.HTTPHeaders, NotACarrier{}) + s.Equal(opentracing.ErrInvalidCarrier, err, "Carrier that's not TextMapReader should return error") + s.Nil(ctx) + + span.Finish() +} + +// TestMultiBaggage tests calls to set multiple baggage items, and if the CheckBaggageValues option +// is set, asserts that a baggage value was successfully retrieved from the span's SpanContext. +// It also ensures that returning false from the ForeachBaggageItem handler aborts iteration. +func (s *APICheckSuite) TestMultiBaggage() { + span := s.tracer.StartSpan("op") + assertEmptyBaggage(s.T(), span.Context()) + + span.SetBaggageItem("Bag1", "BaggageVal1") + span.SetBaggageItem("Bag2", "BaggageVal2") + if s.opts.CheckBaggageValues { + s.Equal("BaggageVal1", span.BaggageItem("Bag1")) + s.Equal("BaggageVal2", span.BaggageItem("Bag2")) + called := false + span.Context().ForeachBaggageItem(func(k, v string) bool { + s.False(called) // should only be called once + called = true + return false + }) + s.True(called) + } else { + s.T().Log("CheckBaggageValues capability not set, skipping") + } + span.Finish() +} diff --git a/vendor/github.com/opentracing/opentracing-go/harness/noop_api_test.go b/vendor/github.com/opentracing/opentracing-go/harness/noop_api_test.go new file mode 100644 index 0000000000..b66e647599 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/harness/noop_api_test.go @@ -0,0 +1,17 @@ +package harness + +import ( + "testing" + + "github.com/opentracing/opentracing-go" +) + +func TestAPI(t *testing.T) { + RunAPIChecks(t, func() (tracer opentracing.Tracer, closer func()) { + return opentracing.NoopTracer{}, nil + }, // NoopTracer doesn't do much + CheckBaggageValues(false), + CheckInject(false), + CheckExtract(false), + ) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 0000000000..f222ded797 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,282 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType + noopType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error.object" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error.object", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +// Please pass in an immutable object, otherwise there may be concurrency issues. +// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write". +// Because span is sent asynchronously, it is possible that this map will also be modified. +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// Event creates a string-valued Field for span logs with key="event" and value=val. +func Event(val string) Field { + return String("event", val) +} + +// Message creates a string-valued Field for span logs with key="message" and value=val. +func Message(val string) Field { + return String("message", val) +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + case noopType: + return nil + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field_test.go b/vendor/github.com/opentracing/opentracing-go/log/field_test.go new file mode 100644 index 0000000000..61a3321f56 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field_test.go @@ -0,0 +1,59 @@ +package log + +import ( + "fmt" + "testing" +) + +func TestFieldString(t *testing.T) { + testCases := []struct { + field Field + expected string + }{ + { + field: String("key", "value"), + expected: "key:value", + }, + { + field: Bool("key", true), + expected: "key:true", + }, + { + field: Int("key", 5), + expected: "key:5", + }, + { + field: Error(fmt.Errorf("err msg")), + expected: "error.object:err msg", + }, + { + field: Error(nil), + expected: "error.object:", + }, + { + field: Noop(), + expected: ":", + }, + { + field: Event("test"), + expected: "event:test", + }, + { + field: Message("test2"), + expected: "message:test2", + }, + } + for i, tc := range testCases { + if str := tc.field.String(); str != tc.expected { + t.Errorf("%d: expected '%s', got '%s'", i, tc.expected, str) + } + } +} + +func TestNoopDoesNotMarshal(t *testing.T) { + mockEncoder := struct { + Encoder + }{} + f := Noop() + f.Marshal(mockEncoder) // panics if any Encoder method is invoked +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 0000000000..d57e28aa57 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,61 @@ +package log + +import ( + "fmt" + "reflect" +) + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) { + fields[i] = String(key, "nil") + continue + } + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util_test.go b/vendor/github.com/opentracing/opentracing-go/log/util_test.go new file mode 100644 index 0000000000..882cff3b53 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util_test.go @@ -0,0 +1,86 @@ +package log + +import ( + "errors" + "io" + "testing" + + "github.com/stretchr/testify/assert" +) + +var nilInterface io.Reader + +func TestInterleavedKVToFields(t *testing.T) { + + tests := []struct { + name string + keyValues []interface{} + want []Field + wantErr bool + }{ + { + "incorrect pair", + []interface{}{"test"}, + nil, + true, + }, + { + "non string key", + []interface{}{struct{}{}, "foo"}, + nil, + true, + }, + { + "happy path", + []interface{}{ + "bool", true, + "string", "string", + "int", int(1), + "int8", int8(2), + "int16", int16(3), + "int64", int64(4), + "uint", uint(5), + "uint64", uint64(6), + "uint8", uint8(7), + "uint16", uint16(8), + "uint32", uint32(9), + "float32", float32(10), + "float64", float64(11), + "int32", int32(12), + "stringer", errors.New("err"), + "nilInterface", nilInterface, + "nil", nil, + }, + []Field{ + Bool("bool", true), + String("string", "string"), + Int("int", int(1)), + Int32("int8", int32(2)), + Int32("int16", int32(3)), + Int64("int64", int64(4)), + Uint64("uint", uint64(5)), + Uint64("uint64", uint64(6)), + Uint32("uint8", uint32(7)), + Uint32("uint16", uint32(8)), + Uint32("uint32", uint32(9)), + Float32("float32", float32(10)), + Float64("float64", float64(11)), + Int32("int32", int32(12)), + String("stringer", errors.New("err").Error()), + String("nilInterface", "nil"), + String("nil", "nil"), + }, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := InterleavedKVToFields(tt.keyValues...) + if (err != nil) != tt.wantErr { + t.Errorf("InterleavedKVToFields() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocklogrecord.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocklogrecord.go new file mode 100644 index 0000000000..2ce96d9d38 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocklogrecord.go @@ -0,0 +1,105 @@ +package mocktracer + +import ( + "fmt" + "reflect" + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// MockLogRecord represents data logged to a Span via Span.LogFields or +// Span.LogKV. +type MockLogRecord struct { + Timestamp time.Time + Fields []MockKeyValue +} + +// MockKeyValue represents a single key:value pair. +type MockKeyValue struct { + Key string + + // All MockLogRecord values are coerced to strings via fmt.Sprint(), though + // we retain their type separately. + ValueKind reflect.Kind + ValueString string +} + +// EmitString belongs to the log.Encoder interface +func (m *MockKeyValue) EmitString(key, value string) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitBool belongs to the log.Encoder interface +func (m *MockKeyValue) EmitBool(key string, value bool) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitInt belongs to the log.Encoder interface +func (m *MockKeyValue) EmitInt(key string, value int) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitInt32 belongs to the log.Encoder interface +func (m *MockKeyValue) EmitInt32(key string, value int32) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitInt64 belongs to the log.Encoder interface +func (m *MockKeyValue) EmitInt64(key string, value int64) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitUint32 belongs to the log.Encoder interface +func (m *MockKeyValue) EmitUint32(key string, value uint32) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitUint64 belongs to the log.Encoder interface +func (m *MockKeyValue) EmitUint64(key string, value uint64) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitFloat32 belongs to the log.Encoder interface +func (m *MockKeyValue) EmitFloat32(key string, value float32) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitFloat64 belongs to the log.Encoder interface +func (m *MockKeyValue) EmitFloat64(key string, value float64) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitObject belongs to the log.Encoder interface +func (m *MockKeyValue) EmitObject(key string, value interface{}) { + m.Key = key + m.ValueKind = reflect.TypeOf(value).Kind() + m.ValueString = fmt.Sprint(value) +} + +// EmitLazyLogger belongs to the log.Encoder interface +func (m *MockKeyValue) EmitLazyLogger(value log.LazyLogger) { + var meta MockKeyValue + value(&meta) + m.Key = meta.Key + m.ValueKind = meta.ValueKind + m.ValueString = meta.ValueString +} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go new file mode 100644 index 0000000000..8c7932ce65 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/mocktracer/mockspan.go @@ -0,0 +1,284 @@ +package mocktracer + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +// MockSpanContext is an opentracing.SpanContext implementation. +// +// It is entirely unsuitable for production use, but appropriate for tests +// that want to verify tracing behavior in other frameworks/applications. +// +// By default all spans have Sampled=true flag, unless {"sampling.priority": 0} +// tag is set. +type MockSpanContext struct { + TraceID int + SpanID int + Sampled bool + Baggage map[string]string +} + +var mockIDSource = uint32(42) + +func nextMockID() int { + return int(atomic.AddUint32(&mockIDSource, 1)) +} + +// ForeachBaggageItem belongs to the SpanContext interface +func (c MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) { + for k, v := range c.Baggage { + if !handler(k, v) { + break + } + } +} + +// WithBaggageItem creates a new context with an extra baggage item. +func (c MockSpanContext) WithBaggageItem(key, value string) MockSpanContext { + var newBaggage map[string]string + if c.Baggage == nil { + newBaggage = map[string]string{key: value} + } else { + newBaggage = make(map[string]string, len(c.Baggage)+1) + for k, v := range c.Baggage { + newBaggage[k] = v + } + newBaggage[key] = value + } + // Use positional parameters so the compiler will help catch new fields. + return MockSpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage} +} + +// MockSpan is an opentracing.Span implementation that exports its internal +// state for testing purposes. +type MockSpan struct { + sync.RWMutex + + ParentID int + + OperationName string + StartTime time.Time + FinishTime time.Time + + // All of the below are protected by the embedded RWMutex. + SpanContext MockSpanContext + tags map[string]interface{} + logs []MockLogRecord + tracer *MockTracer +} + +func newMockSpan(t *MockTracer, name string, opts opentracing.StartSpanOptions) *MockSpan { + tags := opts.Tags + if tags == nil { + tags = map[string]interface{}{} + } + traceID := nextMockID() + parentID := int(0) + var baggage map[string]string + sampled := true + if len(opts.References) > 0 { + traceID = opts.References[0].ReferencedContext.(MockSpanContext).TraceID + parentID = opts.References[0].ReferencedContext.(MockSpanContext).SpanID + sampled = opts.References[0].ReferencedContext.(MockSpanContext).Sampled + baggage = opts.References[0].ReferencedContext.(MockSpanContext).Baggage + } + spanContext := MockSpanContext{traceID, nextMockID(), sampled, baggage} + startTime := opts.StartTime + if startTime.IsZero() { + startTime = time.Now() + } + return &MockSpan{ + ParentID: parentID, + OperationName: name, + StartTime: startTime, + tags: tags, + logs: []MockLogRecord{}, + SpanContext: spanContext, + + tracer: t, + } +} + +// Tags returns a copy of tags accumulated by the span so far +func (s *MockSpan) Tags() map[string]interface{} { + s.RLock() + defer s.RUnlock() + tags := make(map[string]interface{}) + for k, v := range s.tags { + tags[k] = v + } + return tags +} + +// Tag returns a single tag +func (s *MockSpan) Tag(k string) interface{} { + s.RLock() + defer s.RUnlock() + return s.tags[k] +} + +// Logs returns a copy of logs accumulated in the span so far +func (s *MockSpan) Logs() []MockLogRecord { + s.RLock() + defer s.RUnlock() + logs := make([]MockLogRecord, len(s.logs)) + copy(logs, s.logs) + return logs +} + +// Context belongs to the Span interface +func (s *MockSpan) Context() opentracing.SpanContext { + s.Lock() + defer s.Unlock() + return s.SpanContext +} + +// SetTag belongs to the Span interface +func (s *MockSpan) SetTag(key string, value interface{}) opentracing.Span { + s.Lock() + defer s.Unlock() + if key == string(ext.SamplingPriority) { + if v, ok := value.(uint16); ok { + s.SpanContext.Sampled = v > 0 + return s + } + if v, ok := value.(int); ok { + s.SpanContext.Sampled = v > 0 + return s + } + } + s.tags[key] = value + return s +} + +// SetBaggageItem belongs to the Span interface +func (s *MockSpan) SetBaggageItem(key, val string) opentracing.Span { + s.Lock() + defer s.Unlock() + s.SpanContext = s.SpanContext.WithBaggageItem(key, val) + return s +} + +// BaggageItem belongs to the Span interface +func (s *MockSpan) BaggageItem(key string) string { + s.RLock() + defer s.RUnlock() + return s.SpanContext.Baggage[key] +} + +// Finish belongs to the Span interface +func (s *MockSpan) Finish() { + s.Lock() + s.FinishTime = time.Now() + s.Unlock() + s.tracer.recordSpan(s) +} + +// FinishWithOptions belongs to the Span interface +func (s *MockSpan) FinishWithOptions(opts opentracing.FinishOptions) { + s.Lock() + s.FinishTime = opts.FinishTime + s.Unlock() + + // Handle any late-bound LogRecords. + for _, lr := range opts.LogRecords { + s.logFieldsWithTimestamp(lr.Timestamp, lr.Fields...) + } + // Handle (deprecated) BulkLogData. + for _, ld := range opts.BulkLogData { + if ld.Payload != nil { + s.logFieldsWithTimestamp( + ld.Timestamp, + log.String("event", ld.Event), + log.Object("payload", ld.Payload)) + } else { + s.logFieldsWithTimestamp( + ld.Timestamp, + log.String("event", ld.Event)) + } + } + + s.tracer.recordSpan(s) +} + +// String allows printing span for debugging +func (s *MockSpan) String() string { + return fmt.Sprintf( + "traceId=%d, spanId=%d, parentId=%d, sampled=%t, name=%s", + s.SpanContext.TraceID, s.SpanContext.SpanID, s.ParentID, + s.SpanContext.Sampled, s.OperationName) +} + +// LogFields belongs to the Span interface +func (s *MockSpan) LogFields(fields ...log.Field) { + s.logFieldsWithTimestamp(time.Now(), fields...) +} + +// The caller MUST NOT hold s.Lock +func (s *MockSpan) logFieldsWithTimestamp(ts time.Time, fields ...log.Field) { + lr := MockLogRecord{ + Timestamp: ts, + Fields: make([]MockKeyValue, len(fields)), + } + for i, f := range fields { + outField := &(lr.Fields[i]) + f.Marshal(outField) + } + + s.Lock() + defer s.Unlock() + s.logs = append(s.logs, lr) +} + +// LogKV belongs to the Span interface. +// +// This implementations coerces all "values" to strings, though that is not +// something all implementations need to do. Indeed, a motivated person can and +// probably should have this do a typed switch on the values. +func (s *MockSpan) LogKV(keyValues ...interface{}) { + if len(keyValues)%2 != 0 { + s.LogFields(log.Error(fmt.Errorf("Non-even keyValues len: %v", len(keyValues)))) + return + } + fields, err := log.InterleavedKVToFields(keyValues...) + if err != nil { + s.LogFields(log.Error(err), log.String("function", "LogKV")) + return + } + s.LogFields(fields...) +} + +// LogEvent belongs to the Span interface +func (s *MockSpan) LogEvent(event string) { + s.LogFields(log.String("event", event)) +} + +// LogEventWithPayload belongs to the Span interface +func (s *MockSpan) LogEventWithPayload(event string, payload interface{}) { + s.LogFields(log.String("event", event), log.Object("payload", payload)) +} + +// Log belongs to the Span interface +func (s *MockSpan) Log(data opentracing.LogData) { + panic("MockSpan.Log() no longer supported") +} + +// SetOperationName belongs to the Span interface +func (s *MockSpan) SetOperationName(operationName string) opentracing.Span { + s.Lock() + defer s.Unlock() + s.OperationName = operationName + return s +} + +// Tracer belongs to the Span interface +func (s *MockSpan) Tracer() opentracing.Tracer { + return s.tracer +} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go new file mode 100644 index 0000000000..4533da7b1f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer.go @@ -0,0 +1,105 @@ +package mocktracer + +import ( + "sync" + + "github.com/opentracing/opentracing-go" +) + +// New returns a MockTracer opentracing.Tracer implementation that's intended +// to facilitate tests of OpenTracing instrumentation. +func New() *MockTracer { + t := &MockTracer{ + finishedSpans: []*MockSpan{}, + injectors: make(map[interface{}]Injector), + extractors: make(map[interface{}]Extractor), + } + + // register default injectors/extractors + textPropagator := new(TextMapPropagator) + t.RegisterInjector(opentracing.TextMap, textPropagator) + t.RegisterExtractor(opentracing.TextMap, textPropagator) + + httpPropagator := &TextMapPropagator{HTTPHeaders: true} + t.RegisterInjector(opentracing.HTTPHeaders, httpPropagator) + t.RegisterExtractor(opentracing.HTTPHeaders, httpPropagator) + + return t +} + +// MockTracer is only intended for testing OpenTracing instrumentation. +// +// It is entirely unsuitable for production use, but appropriate for tests +// that want to verify tracing behavior in other frameworks/applications. +type MockTracer struct { + sync.RWMutex + finishedSpans []*MockSpan + injectors map[interface{}]Injector + extractors map[interface{}]Extractor +} + +// FinishedSpans returns all spans that have been Finish()'ed since the +// MockTracer was constructed or since the last call to its Reset() method. +func (t *MockTracer) FinishedSpans() []*MockSpan { + t.RLock() + defer t.RUnlock() + spans := make([]*MockSpan, len(t.finishedSpans)) + copy(spans, t.finishedSpans) + return spans +} + +// Reset clears the internally accumulated finished spans. Note that any +// extant MockSpans will still append to finishedSpans when they Finish(), +// even after a call to Reset(). +func (t *MockTracer) Reset() { + t.Lock() + defer t.Unlock() + t.finishedSpans = []*MockSpan{} +} + +// StartSpan belongs to the Tracer interface. +func (t *MockTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { + sso := opentracing.StartSpanOptions{} + for _, o := range opts { + o.Apply(&sso) + } + return newMockSpan(t, operationName, sso) +} + +// RegisterInjector registers injector for given format +func (t *MockTracer) RegisterInjector(format interface{}, injector Injector) { + t.injectors[format] = injector +} + +// RegisterExtractor registers extractor for given format +func (t *MockTracer) RegisterExtractor(format interface{}, extractor Extractor) { + t.extractors[format] = extractor +} + +// Inject belongs to the Tracer interface. +func (t *MockTracer) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { + spanContext, ok := sm.(MockSpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + injector, ok := t.injectors[format] + if !ok { + return opentracing.ErrUnsupportedFormat + } + return injector.Inject(spanContext, carrier) +} + +// Extract belongs to the Tracer interface. +func (t *MockTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { + extractor, ok := t.extractors[format] + if !ok { + return nil, opentracing.ErrUnsupportedFormat + } + return extractor.Extract(carrier) +} + +func (t *MockTracer) recordSpan(span *MockSpan) { + t.Lock() + defer t.Unlock() + t.finishedSpans = append(t.finishedSpans, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer_test.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer_test.go new file mode 100644 index 0000000000..14c04d8c7f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/mocktracer/mocktracer_test.go @@ -0,0 +1,284 @@ +package mocktracer + +import ( + "net/http" + "reflect" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +func TestMockTracer_StartSpan(t *testing.T) { + tracer := New() + span1 := tracer.StartSpan( + "a", + opentracing.Tags(map[string]interface{}{"x": "y"})) + + span2 := span1.Tracer().StartSpan( + "", opentracing.ChildOf(span1.Context())) + span2.Finish() + span1.Finish() + spans := tracer.FinishedSpans() + assert.Equal(t, 2, len(spans)) + + parent := spans[1] + child := spans[0] + assert.Equal(t, map[string]interface{}{"x": "y"}, parent.Tags()) + assert.Equal(t, child.ParentID, parent.Context().(MockSpanContext).SpanID) +} + +func TestMockSpan_SetOperationName(t *testing.T) { + tracer := New() + span := tracer.StartSpan("") + span.SetOperationName("x") + assert.Equal(t, "x", span.(*MockSpan).OperationName) +} + +func TestMockSpanContext_Baggage(t *testing.T) { + tracer := New() + span := tracer.StartSpan("x") + span.SetBaggageItem("x", "y") + assert.Equal(t, "y", span.BaggageItem("x")) + assert.Equal(t, map[string]string{"x": "y"}, span.Context().(MockSpanContext).Baggage) + + baggage := make(map[string]string) + span.Context().ForeachBaggageItem(func(k, v string) bool { + baggage[k] = v + return true + }) + assert.Equal(t, map[string]string{"x": "y"}, baggage) + + span.SetBaggageItem("a", "b") + baggage = make(map[string]string) + span.Context().ForeachBaggageItem(func(k, v string) bool { + baggage[k] = v + return false // exit early + }) + assert.Equal(t, 2, len(span.Context().(MockSpanContext).Baggage)) + assert.Equal(t, 1, len(baggage)) +} + +func TestMockSpan_Tag(t *testing.T) { + tracer := New() + span := tracer.StartSpan("x") + span.SetTag("x", "y") + assert.Equal(t, "y", span.(*MockSpan).Tag("x")) +} + +func TestMockSpan_Tags(t *testing.T) { + tracer := New() + span := tracer.StartSpan("x") + span.SetTag("x", "y") + assert.Equal(t, map[string]interface{}{"x": "y"}, span.(*MockSpan).Tags()) +} + +func TestMockTracer_FinishedSpans_and_Reset(t *testing.T) { + tracer := New() + span := tracer.StartSpan("x") + span.SetTag("x", "y") + span.Finish() + spans := tracer.FinishedSpans() + assert.Equal(t, 1, len(spans)) + assert.Equal(t, map[string]interface{}{"x": "y"}, spans[0].Tags()) + + tracer.Reset() + spans = tracer.FinishedSpans() + assert.Equal(t, 0, len(spans)) +} + +func zeroOutTimestamps(recs []MockLogRecord) { + for i := range recs { + recs[i].Timestamp = time.Time{} + } +} + +func TestMockSpan_LogKV(t *testing.T) { + tracer := New() + span := tracer.StartSpan("s") + span.LogKV("key0", "string0") + span.LogKV("key1", "string1", "key2", uint32(42)) + span.Finish() + spans := tracer.FinishedSpans() + assert.Equal(t, 1, len(spans)) + actual := spans[0].Logs() + zeroOutTimestamps(actual) + assert.Equal(t, []MockLogRecord{ + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "key0", ValueKind: reflect.String, ValueString: "string0"}, + }, + }, + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "key1", ValueKind: reflect.String, ValueString: "string1"}, + MockKeyValue{Key: "key2", ValueKind: reflect.Uint32, ValueString: "42"}, + }, + }, + }, actual) +} + +func TestMockSpan_LogFields(t *testing.T) { + tracer := New() + span := tracer.StartSpan("s") + span.LogFields(log.String("key0", "string0")) + span.LogFields(log.String("key1", "string1"), log.Uint32("key2", uint32(42))) + span.LogFields(log.Lazy(func(fv log.Encoder) { + fv.EmitInt("key_lazy", 12) + })) + span.FinishWithOptions(opentracing.FinishOptions{ + LogRecords: []opentracing.LogRecord{ + {Timestamp: time.Now(), Fields: []log.Field{log.String("key9", "finish")}}, + }}) + spans := tracer.FinishedSpans() + assert.Equal(t, 1, len(spans)) + actual := spans[0].Logs() + zeroOutTimestamps(actual) + assert.Equal(t, []MockLogRecord{ + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "key0", ValueKind: reflect.String, ValueString: "string0"}, + }, + }, + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "key1", ValueKind: reflect.String, ValueString: "string1"}, + MockKeyValue{Key: "key2", ValueKind: reflect.Uint32, ValueString: "42"}, + }, + }, + MockLogRecord{ + Fields: []MockKeyValue{ + // Note that the LazyLogger gets to control the key as well as the value. + MockKeyValue{Key: "key_lazy", ValueKind: reflect.Int, ValueString: "12"}, + }, + }, + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "key9", ValueKind: reflect.String, ValueString: "finish"}, + }, + }, + }, actual) +} + +func TestMockSpan_DeprecatedLogs(t *testing.T) { + tracer := New() + span := tracer.StartSpan("x") + span.LogEvent("x") + span.LogEventWithPayload("y", "z") + span.LogEvent("a") + span.FinishWithOptions(opentracing.FinishOptions{ + BulkLogData: []opentracing.LogData{{Event: "f"}}}) + spans := tracer.FinishedSpans() + assert.Equal(t, 1, len(spans)) + actual := spans[0].Logs() + zeroOutTimestamps(actual) + assert.Equal(t, []MockLogRecord{ + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "event", ValueKind: reflect.String, ValueString: "x"}, + }, + }, + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "event", ValueKind: reflect.String, ValueString: "y"}, + MockKeyValue{Key: "payload", ValueKind: reflect.String, ValueString: "z"}, + }, + }, + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "event", ValueKind: reflect.String, ValueString: "a"}, + }, + }, + MockLogRecord{ + Fields: []MockKeyValue{ + MockKeyValue{Key: "event", ValueKind: reflect.String, ValueString: "f"}, + }, + }, + }, actual) +} + +func TestMockTracer_Propagation(t *testing.T) { + textCarrier := func() interface{} { + return opentracing.TextMapCarrier(make(map[string]string)) + } + textLen := func(c interface{}) int { + return len(c.(opentracing.TextMapCarrier)) + } + + httpCarrier := func() interface{} { + httpHeaders := http.Header(make(map[string][]string)) + return opentracing.HTTPHeadersCarrier(httpHeaders) + } + httpLen := func(c interface{}) int { + return len(c.(opentracing.HTTPHeadersCarrier)) + } + + tests := []struct { + sampled bool + format opentracing.BuiltinFormat + carrier func() interface{} + len func(interface{}) int + }{ + {sampled: true, format: opentracing.TextMap, carrier: textCarrier, len: textLen}, + {sampled: false, format: opentracing.TextMap, carrier: textCarrier, len: textLen}, + {sampled: true, format: opentracing.HTTPHeaders, carrier: httpCarrier, len: httpLen}, + {sampled: false, format: opentracing.HTTPHeaders, carrier: httpCarrier, len: httpLen}, + } + for _, test := range tests { + tracer := New() + span := tracer.StartSpan("x") + span.SetBaggageItem("x", "y:z") // colon should be URL encoded as %3A + if !test.sampled { + ext.SamplingPriority.Set(span, 0) + } + mSpan := span.(*MockSpan) + + assert.Equal(t, opentracing.ErrUnsupportedFormat, + tracer.Inject(span.Context(), opentracing.Binary, nil)) + assert.Equal(t, opentracing.ErrInvalidCarrier, + tracer.Inject(span.Context(), opentracing.TextMap, span)) + + carrier := test.carrier() + + err := tracer.Inject(span.Context(), test.format, carrier) + require.NoError(t, err) + assert.Equal(t, 4, test.len(carrier), "expect baggage + 2 ids + sampled") + if test.format == opentracing.HTTPHeaders { + c := carrier.(opentracing.HTTPHeadersCarrier) + assert.Equal(t, "y%3Az", c["Mockpfx-Baggage-X"][0]) + } + + _, err = tracer.Extract(opentracing.Binary, nil) + assert.Equal(t, opentracing.ErrUnsupportedFormat, err) + _, err = tracer.Extract(opentracing.TextMap, tracer) + assert.Equal(t, opentracing.ErrInvalidCarrier, err) + + extractedContext, err := tracer.Extract(test.format, carrier) + require.NoError(t, err) + assert.Equal(t, mSpan.SpanContext.TraceID, extractedContext.(MockSpanContext).TraceID) + assert.Equal(t, mSpan.SpanContext.SpanID, extractedContext.(MockSpanContext).SpanID) + assert.Equal(t, test.sampled, extractedContext.(MockSpanContext).Sampled) + assert.Equal(t, "y:z", extractedContext.(MockSpanContext).Baggage["x"]) + } +} + +func TestMockSpan_Races(t *testing.T) { + span := New().StartSpan("x") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + span.SetBaggageItem("test_key", "test_value") + }() + go func() { + defer wg.Done() + span.Context() + }() + wg.Wait() +} diff --git a/vendor/github.com/opentracing/opentracing-go/mocktracer/propagation.go b/vendor/github.com/opentracing/opentracing-go/mocktracer/propagation.go new file mode 100644 index 0000000000..8364f1d182 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/mocktracer/propagation.go @@ -0,0 +1,120 @@ +package mocktracer + +import ( + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/opentracing/opentracing-go" +) + +const mockTextMapIdsPrefix = "mockpfx-ids-" +const mockTextMapBaggagePrefix = "mockpfx-baggage-" + +var emptyContext = MockSpanContext{} + +// Injector is responsible for injecting SpanContext instances in a manner suitable +// for propagation via a format-specific "carrier" object. Typically the +// injection will take place across an RPC boundary, but message queues and +// other IPC mechanisms are also reasonable places to use an Injector. +type Injector interface { + // Inject takes `SpanContext` and injects it into `carrier`. The actual type + // of `carrier` depends on the `format` passed to `Tracer.Inject()`. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if injection fails. + Inject(ctx MockSpanContext, carrier interface{}) error +} + +// Extractor is responsible for extracting SpanContext instances from a +// format-specific "carrier" object. Typically the extraction will take place +// on the server side of an RPC boundary, but message queues and other IPC +// mechanisms are also reasonable places to use an Extractor. +type Extractor interface { + // Extract decodes a SpanContext instance from the given `carrier`, + // or (nil, opentracing.ErrSpanContextNotFound) if no context could + // be found in the `carrier`. + Extract(carrier interface{}) (MockSpanContext, error) +} + +// TextMapPropagator implements Injector/Extractor for TextMap and HTTPHeaders formats. +type TextMapPropagator struct { + HTTPHeaders bool +} + +// Inject implements the Injector interface +func (t *TextMapPropagator) Inject(spanContext MockSpanContext, carrier interface{}) error { + writer, ok := carrier.(opentracing.TextMapWriter) + if !ok { + return opentracing.ErrInvalidCarrier + } + // Ids: + writer.Set(mockTextMapIdsPrefix+"traceid", strconv.Itoa(spanContext.TraceID)) + writer.Set(mockTextMapIdsPrefix+"spanid", strconv.Itoa(spanContext.SpanID)) + writer.Set(mockTextMapIdsPrefix+"sampled", fmt.Sprint(spanContext.Sampled)) + // Baggage: + for baggageKey, baggageVal := range spanContext.Baggage { + safeVal := baggageVal + if t.HTTPHeaders { + safeVal = url.QueryEscape(baggageVal) + } + writer.Set(mockTextMapBaggagePrefix+baggageKey, safeVal) + } + return nil +} + +// Extract implements the Extractor interface +func (t *TextMapPropagator) Extract(carrier interface{}) (MockSpanContext, error) { + reader, ok := carrier.(opentracing.TextMapReader) + if !ok { + return emptyContext, opentracing.ErrInvalidCarrier + } + rval := MockSpanContext{0, 0, true, nil} + err := reader.ForeachKey(func(key, val string) error { + lowerKey := strings.ToLower(key) + switch { + case lowerKey == mockTextMapIdsPrefix+"traceid": + // Ids: + i, err := strconv.Atoi(val) + if err != nil { + return err + } + rval.TraceID = i + case lowerKey == mockTextMapIdsPrefix+"spanid": + // Ids: + i, err := strconv.Atoi(val) + if err != nil { + return err + } + rval.SpanID = i + case lowerKey == mockTextMapIdsPrefix+"sampled": + b, err := strconv.ParseBool(val) + if err != nil { + return err + } + rval.Sampled = b + case strings.HasPrefix(lowerKey, mockTextMapBaggagePrefix): + // Baggage: + if rval.Baggage == nil { + rval.Baggage = make(map[string]string) + } + safeVal := val + if t.HTTPHeaders { + // unescape errors are ignored, nothing can be done + if rawVal, err := url.QueryUnescape(val); err == nil { + safeVal = rawVal + } + } + rval.Baggage[lowerKey[len(mockTextMapBaggagePrefix):]] = safeVal + } + return nil + }) + if rval.TraceID == 0 || rval.SpanID == 0 { + return emptyContext, opentracing.ErrSpanContextNotFound + } + if err != nil { + return emptyContext, err + } + return rval, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 0000000000..f9b680a213 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext SpanContext = noopSpanContext{} + defaultNoopSpan Span = noopSpan{} + defaultNoopTracer Tracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return n } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/options_test.go b/vendor/github.com/opentracing/opentracing-go/options_test.go new file mode 100644 index 0000000000..56a543bfe5 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/options_test.go @@ -0,0 +1,31 @@ +package opentracing + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestChildOfAndFollowsFrom(t *testing.T) { + tests := []struct { + newOpt func(SpanContext) SpanReference + refType SpanReferenceType + name string + }{ + {ChildOf, ChildOfRef, "ChildOf"}, + {FollowsFrom, FollowsFromRef, "FollowsFrom"}, + } + + for _, test := range tests { + opts := new(StartSpanOptions) + + test.newOpt(nil).Apply(opts) + require.Nil(t, opts.References, "%s(nil) must not append a reference", test.name) + + ctx := new(noopSpanContext) + test.newOpt(ctx).Apply(opts) + require.Equal(t, []SpanReference{ + SpanReference{ReferencedContext: ctx, Type: test.refType}, + }, opts.References, "%s(ctx) must append a reference", test.name) + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 0000000000..b0c275eb05 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeadersCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span.Context(), opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HTTPHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Set(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation_test.go b/vendor/github.com/opentracing/opentracing-go/propagation_test.go new file mode 100644 index 0000000000..e3dad55978 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation_test.go @@ -0,0 +1,93 @@ +package opentracing + +import ( + "net/http" + "strconv" + "testing" +) + +const testHeaderPrefix = "testprefix-" + +func TestTextMapCarrierInject(t *testing.T) { + m := make(map[string]string) + m["NotOT"] = "blah" + m["opname"] = "AlsoNotOT" + tracer := testTracer{} + span := tracer.StartSpan("someSpan") + fakeID := span.Context().(testSpanContext).FakeID + + carrier := TextMapCarrier(m) + if err := span.Tracer().Inject(span.Context(), TextMap, carrier); err != nil { + t.Fatal(err) + } + + if len(m) != 3 { + t.Errorf("Unexpected header length: %v", len(m)) + } + // The prefix comes from just above; the suffix comes from + // testTracer.Inject(). + if m["testprefix-fakeid"] != strconv.Itoa(fakeID) { + t.Errorf("Could not find fakeid at expected key") + } +} + +func TestTextMapCarrierExtract(t *testing.T) { + m := make(map[string]string) + m["NotOT"] = "blah" + m["opname"] = "AlsoNotOT" + m["testprefix-fakeid"] = "42" + tracer := testTracer{} + + carrier := TextMapCarrier(m) + extractedContext, err := tracer.Extract(TextMap, carrier) + if err != nil { + t.Fatal(err) + } + + if extractedContext.(testSpanContext).FakeID != 42 { + t.Errorf("Failed to read testprefix-fakeid correctly") + } +} + +func TestHTTPHeaderInject(t *testing.T) { + h := http.Header{} + h.Add("NotOT", "blah") + h.Add("opname", "AlsoNotOT") + tracer := testTracer{} + span := tracer.StartSpan("someSpan") + fakeID := span.Context().(testSpanContext).FakeID + + // Use HTTPHeadersCarrier to wrap around `h`. + carrier := HTTPHeadersCarrier(h) + if err := span.Tracer().Inject(span.Context(), HTTPHeaders, carrier); err != nil { + t.Fatal(err) + } + + if len(h) != 3 { + t.Errorf("Unexpected header length: %v", len(h)) + } + // The prefix comes from just above; the suffix comes from + // testTracer.Inject(). + if h.Get("testprefix-fakeid") != strconv.Itoa(fakeID) { + t.Errorf("Could not find fakeid at expected key") + } +} + +func TestHTTPHeaderExtract(t *testing.T) { + h := http.Header{} + h.Add("NotOT", "blah") + h.Add("opname", "AlsoNotOT") + h.Add("testprefix-fakeid", "42") + tracer := testTracer{} + + // Use HTTPHeadersCarrier to wrap around `h`. + carrier := HTTPHeadersCarrier(h) + spanContext, err := tracer.Extract(HTTPHeaders, carrier) + if err != nil { + t.Fatal(err) + } + + if spanContext.(testSpanContext).FakeID != 42 { + t.Errorf("Failed to read testprefix-fakeid correctly") + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 0000000000..0d3fb53418 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,189 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/testtracer_test.go b/vendor/github.com/opentracing/opentracing-go/testtracer_test.go new file mode 100644 index 0000000000..dd13788cf0 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/testtracer_test.go @@ -0,0 +1,138 @@ +package opentracing + +import ( + "strconv" + "strings" + "time" + + "github.com/opentracing/opentracing-go/log" +) + +const testHTTPHeaderPrefix = "testprefix-" + +// testTracer is a most-noop Tracer implementation that makes it possible for +// unittests to verify whether certain methods were / were not called. +type testTracer struct{} + +var fakeIDSource = 1 + +func nextFakeID() int { + fakeIDSource++ + return fakeIDSource +} + +type testSpanContext struct { + HasParent bool + FakeID int +} + +func (n testSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +type testSpan struct { + spanContext testSpanContext + OperationName string + StartTime time.Time + Tags map[string]interface{} +} + +func (n testSpan) Equal(os Span) bool { + other, ok := os.(testSpan) + if !ok { + return false + } + if n.spanContext != other.spanContext { + return false + } + if n.OperationName != other.OperationName { + return false + } + if !n.StartTime.Equal(other.StartTime) { + return false + } + if len(n.Tags) != len(other.Tags) { + return false + } + + for k, v := range n.Tags { + if ov, ok := other.Tags[k]; !ok || ov != v { + return false + } + } + + return true +} + +// testSpan: +func (n testSpan) Context() SpanContext { return n.spanContext } +func (n testSpan) SetTag(key string, value interface{}) Span { return n } +func (n testSpan) Finish() {} +func (n testSpan) FinishWithOptions(opts FinishOptions) {} +func (n testSpan) LogFields(fields ...log.Field) {} +func (n testSpan) LogKV(kvs ...interface{}) {} +func (n testSpan) SetOperationName(operationName string) Span { return n } +func (n testSpan) Tracer() Tracer { return testTracer{} } +func (n testSpan) SetBaggageItem(key, val string) Span { return n } +func (n testSpan) BaggageItem(key string) string { return "" } +func (n testSpan) LogEvent(event string) {} +func (n testSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n testSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n testTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + sso := StartSpanOptions{} + for _, o := range opts { + o.Apply(&sso) + } + return n.startSpanWithOptions(operationName, sso) +} + +func (n testTracer) startSpanWithOptions(name string, opts StartSpanOptions) Span { + fakeID := nextFakeID() + if len(opts.References) > 0 { + fakeID = opts.References[0].ReferencedContext.(testSpanContext).FakeID + } + + return testSpan{ + OperationName: name, + StartTime: opts.StartTime, + Tags: opts.Tags, + spanContext: testSpanContext{ + HasParent: len(opts.References) > 0, + FakeID: fakeID, + }, + } +} + +// Inject belongs to the Tracer interface. +func (n testTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + spanContext := sp.(testSpanContext) + switch format { + case HTTPHeaders, TextMap: + carrier.(TextMapWriter).Set(testHTTPHeaderPrefix+"fakeid", strconv.Itoa(spanContext.FakeID)) + return nil + } + return ErrUnsupportedFormat +} + +// Extract belongs to the Tracer interface. +func (n testTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + switch format { + case HTTPHeaders, TextMap: + // Just for testing purposes... generally not a worthwhile thing to + // propagate. + sm := testSpanContext{} + err := carrier.(TextMapReader).ForeachKey(func(key, val string) error { + switch strings.ToLower(key) { + case testHTTPHeaderPrefix + "fakeid": + i, err := strconv.Atoi(val) + if err != nil { + return err + } + sm.FakeID = i + } + return nil + }) + return sm, err + } + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 0000000000..715f0cedfb --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,304 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see https://godoc.org/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/polydawn/refmt/.gitignore b/vendor/github.com/polydawn/refmt/.gitignore new file mode 100644 index 0000000000..9010fa354c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gitignore @@ -0,0 +1,2 @@ +.gopath/pkg +.gopath/tmp diff --git a/vendor/github.com/polydawn/refmt/.gitmodules b/vendor/github.com/polydawn/refmt/.gitmodules new file mode 100644 index 0000000000..1106688a09 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gitmodules @@ -0,0 +1,18 @@ +[submodule ".gopath/src/github.com/smartystreets/goconvey"] + path = .gopath/src/github.com/smartystreets/goconvey + url = https://github.com/smartystreets/goconvey +[submodule ".gopath/src/github.com/smartystreets/assertions"] + path = .gopath/src/github.com/smartystreets/assertions + url = https://github.com/smartystreets/assertions +[submodule ".gopath/src/github.com/jtolds/gls"] + path = .gopath/src/github.com/jtolds/gls + url = https://github.com/jtolds/gls +[submodule ".gopath/src/github.com/urfave/cli"] + path = .gopath/src/github.com/urfave/cli + url = https://github.com/urfave/cli/ +[submodule ".gopath/src/github.com/go-yaml/yaml"] + path = .gopath/src/github.com/go-yaml/yaml + url = https://github.com/go-yaml/yaml/ +[submodule ".gopath/src/github.com/warpfork/go-wish"] + path = .gopath/src/github.com/warpfork/go-wish + url = https://github.com/warpfork/go-wish diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/.travis.yml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/.travis.yml new file mode 100644 index 0000000000..004172a2e3 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/LICENSE b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/LICENSE.libyaml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/LICENSE.libyaml new file mode 100644 index 0000000000..8da58fbf6f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/README.md new file mode 100644 index 0000000000..7a512d67c2 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +Some more examples can be found in the "examples" folder. + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/apic.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/apic.go new file mode 100644 index 0000000000..95ec014e8c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/decode.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/decode.go new file mode 100644 index 0000000000..db1f5f2068 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/decode.go @@ -0,0 +1,685 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string + strict bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/decode_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/decode_test.go new file mode 100644 index 0000000000..713b1ee9c4 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/decode_test.go @@ -0,0 +1,1017 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Non-specific tag (Issue #75) + { + "v: ! test", + map[string]interface{}{"v": "test"}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0).In(time.UTC)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, + + // UTF-16-LE + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00", + M{"ñoño": "very yes"}, + }, + // UTF-16-LE with surrogate. + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00", + M{"ñoño": "very yes 🟔"}, + }, + + // UTF-16-BE + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n", + M{"ñoño": "very yes"}, + }, + // UTF-16-BE with surrogate. + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n", + M{"ñoño": "very yes 🟔"}, + }, + + // YAML Float regex shouldn't match this + { + "a: 123456e1\n", + M{"a": "123456e1"}, + }, { + "a: 123456E1\n", + M{"a": "123456E1"}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for i, item := range unmarshalTests { + c.Logf("test %d: %q", i, item.data) + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, + {"%TAG !%79! tag:yaml.org,2002:\n---\nv: !%79!int '1'", "yaml: did not find expected whitespace"}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, + {`_: ""`, "!!str", ""}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +func (s *S) TestUnmarshalStrict(c *C) { + v := struct{ A, B int }{} + + err := yaml.UnmarshalStrict([]byte("a: 1\nb: 2"), &v) + c.Check(err, IsNil) + err = yaml.Unmarshal([]byte("a: 1\nb: 2\nc: 3"), &v) + c.Check(err, IsNil) + err = yaml.UnmarshalStrict([]byte("a: 1\nb: 2\nc: 3"), &v) + c.Check(err, ErrorMatches, "yaml: unmarshal errors:\n line 1: field c not found in struct struct { A int; B int }") +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/emitterc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/emitterc.go new file mode 100644 index 0000000000..41de8b856c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/emitterc.go @@ -0,0 +1,1684 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/encode.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/encode.go new file mode 100644 index 0000000000..84f8499551 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/encode_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/encode_test.go new file mode 100644 index 0000000000..84099bd385 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/encode_test.go @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/example_embedded_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/example_embedded_test.go new file mode 100644 index 0000000000..c8b241d549 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/example_embedded_test.go @@ -0,0 +1,41 @@ +package yaml_test + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +// An example showing how to unmarshal embedded +// structs from YAML. + +type StructA struct { + A string `yaml:"a"` +} + +type StructB struct { + // Embedded structs are not treated as embedded in YAML by default. To do that, + // add the ",inline" annotation below + StructA `yaml:",inline"` + B string `yaml:"b"` +} + +var data = ` +a: a string from struct A +b: a string from struct B +` + +func ExampleUnmarshal_embedded() { + var b StructB + + err := yaml.Unmarshal([]byte(data), &b) + if err != nil { + log.Fatal("cannot unmarshal data: %v", err) + } + fmt.Println(b.A) + fmt.Println(b.B) + // Output: + // a string from struct A + // a string from struct B +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/parserc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/parserc.go new file mode 100644 index 0000000000..81d05dfe57 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/readerc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/readerc.go new file mode 100644 index 0000000000..f450791717 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/resolve.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/resolve.go new file mode 100644 index 0000000000..232313cc08 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/resolve.go @@ -0,0 +1,208 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/scannerc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/scannerc.go new file mode 100644 index 0000000000..0744844558 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/sorter.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/sorter.go new file mode 100644 index 0000000000..5958822f9c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/suite_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/suite_test.go new file mode 100644 index 0000000000..c5cf1ed4f6 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/writerc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/writerc.go new file mode 100644 index 0000000000..190362f25d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yaml.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yaml.go new file mode 100644 index 0000000000..bf18884e0e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yaml.go @@ -0,0 +1,357 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yamlh.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yamlh.go new file mode 100644 index 0000000000..3caeca0491 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yamlprivateh.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yamlprivateh.go new file mode 100644 index 0000000000..8110ce3c37 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/go-yaml/yaml/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/LICENSE b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/LICENSE new file mode 100644 index 0000000000..9b4a822d92 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013, Space Monkey, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/README.md new file mode 100644 index 0000000000..4ebb692fb1 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/README.md @@ -0,0 +1,89 @@ +gls +=== + +Goroutine local storage + +### IMPORTANT NOTE ### + +It is my duty to point you to https://blog.golang.org/context, which is how +Google solves all of the problems you'd perhaps consider using this package +for at scale. + +One downside to Google's approach is that *all* of your functions must have +a new first argument, but after clearing that hurdle everything else is much +better. + +If you aren't interested in this warning, read on. + +### Huhwaht? Why? ### + +Every so often, a thread shows up on the +[golang-nuts](https://groups.google.com/d/forum/golang-nuts) asking for some +form of goroutine-local-storage, or some kind of goroutine id, or some kind of +context. There are a few valid use cases for goroutine-local-storage, one of +the most prominent being log line context. One poster was interested in being +able to log an HTTP request context id in every log line in the same goroutine +as the incoming HTTP request, without having to change every library and +function call he was interested in logging. + +This would be pretty useful. Provided that you could get some kind of +goroutine-local-storage, you could call +[log.SetOutput](http://golang.org/pkg/log/#SetOutput) with your own logging +writer that checks goroutine-local-storage for some context information and +adds that context to your log lines. + +But alas, Andrew Gerrand's typically diplomatic answer to the question of +goroutine-local variables was: + +> We wouldn't even be having this discussion if thread local storage wasn't +> useful. But every feature comes at a cost, and in my opinion the cost of +> threadlocals far outweighs their benefits. They're just not a good fit for +> Go. + +So, yeah, that makes sense. That's a pretty good reason for why the language +won't support a specific and (relatively) unuseful feature that requires some +runtime changes, just for the sake of a little bit of log improvement. + +But does Go require runtime changes? + +### How it works ### + +Go has pretty fantastic introspective and reflective features, but one thing Go +doesn't give you is any kind of access to the stack pointer, or frame pointer, +or goroutine id, or anything contextual about your current stack. It gives you +access to your list of callers, but only along with program counters, which are +fixed at compile time. + +But it does give you the stack. + +So, we define 16 special functions and embed base-16 tags into the stack using +the call order of those 16 functions. Then, we can read our tags back out of +the stack looking at the callers list. + +We then use these tags as an index into a traditional map for implementing +this library. + +### What are people saying? ### + +"Wow, that's horrifying." + +"This is the most terrible thing I have seen in a very long time." + +"Where is it getting a context from? Is this serializing all the requests? +What the heck is the client being bound to? What are these tags? Why does he +need callers? Oh god no. No no no." + +### Docs ### + +Please see the docs at http://godoc.org/github.com/jtolds/gls + +### Related ### + +If you're okay relying on the string format of the current runtime stacktrace +including a unique goroutine id (not guaranteed by the spec or anything, but +very unlikely to change within a Go release), you might be able to squeeze +out a bit more performance by using this similar library, inspired by some +code Brad Fitzpatrick wrote for debugging his HTTP/2 library: +https://github.com/tylerb/gls (in contrast, jtolds/gls doesn't require +any knowledge of the string format of the runtime stacktrace, which +probably adds unnecessary overhead). diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/context.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/context.go new file mode 100644 index 0000000000..618a171061 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/context.go @@ -0,0 +1,153 @@ +// Package gls implements goroutine-local storage. +package gls + +import ( + "sync" +) + +var ( + mgrRegistry = make(map[*ContextManager]bool) + mgrRegistryMtx sync.RWMutex +) + +// Values is simply a map of key types to value types. Used by SetValues to +// set multiple values at once. +type Values map[interface{}]interface{} + +// ContextManager is the main entrypoint for interacting with +// Goroutine-local-storage. You can have multiple independent ContextManagers +// at any given time. ContextManagers are usually declared globally for a given +// class of context variables. You should use NewContextManager for +// construction. +type ContextManager struct { + mtx sync.Mutex + values map[uint]Values +} + +// NewContextManager returns a brand new ContextManager. It also registers the +// new ContextManager in the ContextManager registry which is used by the Go +// method. ContextManagers are typically defined globally at package scope. +func NewContextManager() *ContextManager { + mgr := &ContextManager{values: make(map[uint]Values)} + mgrRegistryMtx.Lock() + defer mgrRegistryMtx.Unlock() + mgrRegistry[mgr] = true + return mgr +} + +// Unregister removes a ContextManager from the global registry, used by the +// Go method. Only intended for use when you're completely done with a +// ContextManager. Use of Unregister at all is rare. +func (m *ContextManager) Unregister() { + mgrRegistryMtx.Lock() + defer mgrRegistryMtx.Unlock() + delete(mgrRegistry, m) +} + +// SetValues takes a collection of values and a function to call for those +// values to be set in. Anything further down the stack will have the set +// values available through GetValue. SetValues will add new values or replace +// existing values of the same key and will not mutate or change values for +// previous stack frames. +// SetValues is slow (makes a copy of all current and new values for the new +// gls-context) in order to reduce the amount of lookups GetValue requires. +func (m *ContextManager) SetValues(new_values Values, context_call func()) { + if len(new_values) == 0 { + context_call() + return + } + + mutated_keys := make([]interface{}, 0, len(new_values)) + mutated_vals := make(Values, len(new_values)) + + EnsureGoroutineId(func(gid uint) { + m.mtx.Lock() + state, found := m.values[gid] + if !found { + state = make(Values, len(new_values)) + m.values[gid] = state + } + m.mtx.Unlock() + + for key, new_val := range new_values { + mutated_keys = append(mutated_keys, key) + if old_val, ok := state[key]; ok { + mutated_vals[key] = old_val + } + state[key] = new_val + } + + defer func() { + if !found { + m.mtx.Lock() + delete(m.values, gid) + m.mtx.Unlock() + return + } + + for _, key := range mutated_keys { + if val, ok := mutated_vals[key]; ok { + state[key] = val + } else { + delete(state, key) + } + } + }() + + context_call() + }) +} + +// GetValue will return a previously set value, provided that the value was set +// by SetValues somewhere higher up the stack. If the value is not found, ok +// will be false. +func (m *ContextManager) GetValue(key interface{}) ( + value interface{}, ok bool) { + gid, ok := GetGoroutineId() + if !ok { + return nil, false + } + + m.mtx.Lock() + state, found := m.values[gid] + m.mtx.Unlock() + + if !found { + return nil, false + } + value, ok = state[key] + return value, ok +} + +func (m *ContextManager) getValues() Values { + gid, ok := GetGoroutineId() + if !ok { + return nil + } + m.mtx.Lock() + state, _ := m.values[gid] + m.mtx.Unlock() + return state +} + +// Go preserves ContextManager values and Goroutine-local-storage across new +// goroutine invocations. The Go method makes a copy of all existing values on +// all registered context managers and makes sure they are still set after +// kicking off the provided function in a new goroutine. If you don't use this +// Go method instead of the standard 'go' keyword, you will lose values in +// ContextManagers, as goroutines have brand new stacks. +func Go(cb func()) { + mgrRegistryMtx.RLock() + defer mgrRegistryMtx.RUnlock() + + for mgr := range mgrRegistry { + values := mgr.getValues() + if len(values) > 0 { + cb = func(mgr *ContextManager, cb func()) func() { + return func() { mgr.SetValues(values, cb) } + }(mgr, cb) + } + } + + go cb() +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/context_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/context_test.go new file mode 100644 index 0000000000..2fa426cb5d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/context_test.go @@ -0,0 +1,145 @@ +package gls_test + +import ( + "fmt" + "sync" + "testing" + + "github.com/jtolds/gls" +) + +func TestContexts(t *testing.T) { + mgr1 := gls.NewContextManager() + mgr2 := gls.NewContextManager() + + CheckVal := func(mgr *gls.ContextManager, key, exp_val string) { + val, ok := mgr.GetValue(key) + if len(exp_val) == 0 { + if ok { + t.Fatalf("expected no value for key %s, got %s", key, val) + } + return + } + if !ok { + t.Fatalf("expected value %s for key %s, got no value", + exp_val, key) + } + if exp_val != val { + t.Fatalf("expected value %s for key %s, got %s", exp_val, key, + val) + } + + } + + Check := func(exp_m1v1, exp_m1v2, exp_m2v1, exp_m2v2 string) { + CheckVal(mgr1, "key1", exp_m1v1) + CheckVal(mgr1, "key2", exp_m1v2) + CheckVal(mgr2, "key1", exp_m2v1) + CheckVal(mgr2, "key2", exp_m2v2) + } + + Check("", "", "", "") + mgr2.SetValues(gls.Values{"key1": "val1c"}, func() { + Check("", "", "val1c", "") + mgr1.SetValues(gls.Values{"key1": "val1a"}, func() { + Check("val1a", "", "val1c", "") + mgr1.SetValues(gls.Values{"key2": "val1b"}, func() { + Check("val1a", "val1b", "val1c", "") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + Check("", "", "", "") + }() + gls.Go(func() { + defer wg.Done() + Check("val1a", "val1b", "val1c", "") + }) + wg.Wait() + Check("val1a", "val1b", "val1c", "") + }) + Check("val1a", "", "val1c", "") + }) + Check("", "", "val1c", "") + }) + Check("", "", "", "") +} + +func ExampleContextManager_SetValues() { + var ( + mgr = gls.NewContextManager() + request_id_key = gls.GenSym() + ) + + MyLog := func() { + if request_id, ok := mgr.GetValue(request_id_key); ok { + fmt.Println("My request id is:", request_id) + } else { + fmt.Println("No request id found") + } + } + + mgr.SetValues(gls.Values{request_id_key: "12345"}, func() { + MyLog() + }) + MyLog() + + // Output: My request id is: 12345 + // No request id found +} + +func ExampleGo() { + var ( + mgr = gls.NewContextManager() + request_id_key = gls.GenSym() + ) + + MyLog := func() { + if request_id, ok := mgr.GetValue(request_id_key); ok { + fmt.Println("My request id is:", request_id) + } else { + fmt.Println("No request id found") + } + } + + mgr.SetValues(gls.Values{request_id_key: "12345"}, func() { + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + MyLog() + }() + wg.Wait() + wg.Add(1) + gls.Go(func() { + defer wg.Done() + MyLog() + }) + wg.Wait() + }) + + // Output: No request id found + // My request id is: 12345 +} + +func BenchmarkGetValue(b *testing.B) { + mgr := gls.NewContextManager() + mgr.SetValues(gls.Values{"test_key": "test_val"}, func() { + b.ResetTimer() + for i := 0; i < b.N; i++ { + val, ok := mgr.GetValue("test_key") + if !ok || val != "test_val" { + b.FailNow() + } + } + }) +} + +func BenchmarkSetValues(b *testing.B) { + mgr := gls.NewContextManager() + for i := 0; i < b.N/2; i++ { + mgr.SetValues(gls.Values{"test_key": "test_val"}, func() { + mgr.SetValues(gls.Values{"test_key2": "test_val2"}, func() {}) + }) + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/gen_sym.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/gen_sym.go new file mode 100644 index 0000000000..7f615cce93 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/gen_sym.go @@ -0,0 +1,21 @@ +package gls + +import ( + "sync" +) + +var ( + keyMtx sync.Mutex + keyCounter uint64 +) + +// ContextKey is a throwaway value you can use as a key to a ContextManager +type ContextKey struct{ id uint64 } + +// GenSym will return a brand new, never-before-used ContextKey +func GenSym() ContextKey { + keyMtx.Lock() + defer keyMtx.Unlock() + keyCounter += 1 + return ContextKey{id: keyCounter} +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/gid.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/gid.go new file mode 100644 index 0000000000..c16bf3a554 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/gid.go @@ -0,0 +1,25 @@ +package gls + +var ( + stackTagPool = &idPool{} +) + +// Will return this goroutine's identifier if set. If you always need a +// goroutine identifier, you should use EnsureGoroutineId which will make one +// if there isn't one already. +func GetGoroutineId() (gid uint, ok bool) { + return readStackTag() +} + +// Will call cb with the current goroutine identifier. If one hasn't already +// been generated, one will be created and set first. The goroutine identifier +// might be invalid after cb returns. +func EnsureGoroutineId(cb func(gid uint)) { + if gid, ok := readStackTag(); ok { + cb(gid) + return + } + gid := stackTagPool.Acquire() + defer stackTagPool.Release(gid) + addStackTag(gid, func() { cb(gid) }) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/id_pool.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/id_pool.go new file mode 100644 index 0000000000..b7974ae002 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/id_pool.go @@ -0,0 +1,34 @@ +package gls + +// though this could probably be better at keeping ids smaller, the goal of +// this class is to keep a registry of the smallest unique integer ids +// per-process possible + +import ( + "sync" +) + +type idPool struct { + mtx sync.Mutex + released []uint + max_id uint +} + +func (p *idPool) Acquire() (id uint) { + p.mtx.Lock() + defer p.mtx.Unlock() + if len(p.released) > 0 { + id = p.released[len(p.released)-1] + p.released = p.released[:len(p.released)-1] + return id + } + id = p.max_id + p.max_id++ + return id +} + +func (p *idPool) Release(id uint) { + p.mtx.Lock() + defer p.mtx.Unlock() + p.released = append(p.released, id) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags.go new file mode 100644 index 0000000000..37bbd3347a --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags.go @@ -0,0 +1,147 @@ +package gls + +// so, basically, we're going to encode integer tags in base-16 on the stack + +const ( + bitWidth = 4 + stackBatchSize = 16 +) + +var ( + pc_lookup = make(map[uintptr]int8, 17) + mark_lookup [16]func(uint, func()) +) + +func init() { + setEntries := func(f func(uint, func()), v int8) { + var ptr uintptr + f(0, func() { + ptr = findPtr() + }) + pc_lookup[ptr] = v + if v >= 0 { + mark_lookup[v] = f + } + } + setEntries(github_com_jtolds_gls_markS, -0x1) + setEntries(github_com_jtolds_gls_mark0, 0x0) + setEntries(github_com_jtolds_gls_mark1, 0x1) + setEntries(github_com_jtolds_gls_mark2, 0x2) + setEntries(github_com_jtolds_gls_mark3, 0x3) + setEntries(github_com_jtolds_gls_mark4, 0x4) + setEntries(github_com_jtolds_gls_mark5, 0x5) + setEntries(github_com_jtolds_gls_mark6, 0x6) + setEntries(github_com_jtolds_gls_mark7, 0x7) + setEntries(github_com_jtolds_gls_mark8, 0x8) + setEntries(github_com_jtolds_gls_mark9, 0x9) + setEntries(github_com_jtolds_gls_markA, 0xa) + setEntries(github_com_jtolds_gls_markB, 0xb) + setEntries(github_com_jtolds_gls_markC, 0xc) + setEntries(github_com_jtolds_gls_markD, 0xd) + setEntries(github_com_jtolds_gls_markE, 0xe) + setEntries(github_com_jtolds_gls_markF, 0xf) +} + +func addStackTag(tag uint, context_call func()) { + if context_call == nil { + return + } + github_com_jtolds_gls_markS(tag, context_call) +} + +// these private methods are named this horrendous name so gopherjs support +// is easier. it shouldn't add any runtime cost in non-js builds. + +//go:noinline +func github_com_jtolds_gls_markS(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark0(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark1(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark2(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark3(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark4(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark5(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark6(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark7(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark8(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark9(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markA(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markB(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markC(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markD(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markE(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markF(tag uint, cb func()) { _m(tag, cb) } + +func _m(tag_remainder uint, cb func()) { + if tag_remainder == 0 { + cb() + } else { + mark_lookup[tag_remainder&0xf](tag_remainder>>bitWidth, cb) + } +} + +func readStackTag() (tag uint, ok bool) { + var current_tag uint + offset := 0 + for { + batch, next_offset := getStack(offset, stackBatchSize) + for _, pc := range batch { + val, ok := pc_lookup[pc] + if !ok { + continue + } + if val < 0 { + return current_tag, true + } + current_tag <<= bitWidth + current_tag += uint(val) + } + if next_offset == 0 { + break + } + offset = next_offset + } + return 0, false +} + +func (m *ContextManager) preventInlining() { + // dunno if findPtr or getStack are likely to get inlined in a future release + // of go, but if they are inlined and their callers are inlined, that could + // hork some things. let's do our best to explain to the compiler that we + // really don't want those two functions inlined by saying they could change + // at any time. assumes preventInlining doesn't get compiled out. + // this whole thing is probably overkill. + findPtr = m.values[0][0].(func() uintptr) + getStack = m.values[0][1].(func(int, int) ([]uintptr, int)) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags_js.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags_js.go new file mode 100644 index 0000000000..c4e8b801d3 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags_js.go @@ -0,0 +1,75 @@ +// +build js + +package gls + +// This file is used for GopherJS builds, which don't have normal runtime +// stack trace support + +import ( + "strconv" + "strings" + + "github.com/gopherjs/gopherjs/js" +) + +const ( + jsFuncNamePrefix = "github_com_jtolds_gls_mark" +) + +func jsMarkStack() (f []uintptr) { + lines := strings.Split( + js.Global.Get("Error").New().Get("stack").String(), "\n") + f = make([]uintptr, 0, len(lines)) + for i, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + if i == 0 { + if line != "Error" { + panic("didn't understand js stack trace") + } + continue + } + fields := strings.Fields(line) + if len(fields) < 2 || fields[0] != "at" { + panic("didn't understand js stack trace") + } + + pos := strings.Index(fields[1], jsFuncNamePrefix) + if pos < 0 { + continue + } + pos += len(jsFuncNamePrefix) + if pos >= len(fields[1]) { + panic("didn't understand js stack trace") + } + char := string(fields[1][pos]) + switch char { + case "S": + f = append(f, uintptr(0)) + default: + val, err := strconv.ParseUint(char, 16, 8) + if err != nil { + panic("didn't understand js stack trace") + } + f = append(f, uintptr(val)+1) + } + } + return f +} + +// variables to prevent inlining +var ( + findPtr = func() uintptr { + funcs := jsMarkStack() + if len(funcs) == 0 { + panic("failed to find function pointer") + } + return funcs[0] + } + + getStack = func(offset, amount int) (stack []uintptr, next_offset int) { + return jsMarkStack(), 0 + } +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags_main.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags_main.go new file mode 100644 index 0000000000..4da89e44f8 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/jtolds/gls/stack_tags_main.go @@ -0,0 +1,30 @@ +// +build !js + +package gls + +// This file is used for standard Go builds, which have the expected runtime +// support + +import ( + "runtime" +) + +var ( + findPtr = func() uintptr { + var pc [1]uintptr + n := runtime.Callers(4, pc[:]) + if n != 1 { + panic("failed to find function pointer") + } + return pc[0] + } + + getStack = func(offset, amount int) (stack []uintptr, next_offset int) { + stack = make([]uintptr, amount) + stack = stack[:runtime.Callers(offset, stack)] + if len(stack) < amount { + return stack, 0 + } + return stack, offset + len(stack) + } +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/.gitignore b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/.gitignore new file mode 100644 index 0000000000..1f088e844d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/.gitignore @@ -0,0 +1,4 @@ +/.idea +/coverage.* +.DS_Store +*.iml diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/.travis.yml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/.travis.yml new file mode 100644 index 0000000000..6a4065b7cc --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.x + +env: +- GO111MODULE=on + +script: +- make build + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/CONTRIBUTING.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/CONTRIBUTING.md new file mode 100644 index 0000000000..1820ecb331 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contributing + +In general, the code posted to the [SmartyStreets github organization](https://github.com/smartystreets) is created to solve specific problems at SmartyStreets that are ancillary to our core products in the address verification industry and may or may not be useful to other organizations or developers. Our reason for posting said code isn't necessarily to solicit feedback or contributions from the community but more as a showcase of some of the approaches to solving problems we have adopted. + +Having stated that, we do consider issues raised by other githubbers as well as contributions submitted via pull requests. When submitting such a pull request, please follow these guidelines: + +- _Look before you leap:_ If the changes you plan to make are significant, it's in everyone's best interest for you to discuss them with a SmartyStreets team member prior to opening a pull request. +- _License and ownership:_ If modifying the `LICENSE.md` file, limit your changes to fixing typographical mistakes. Do NOT modify the actual terms in the license or the copyright by **SmartyStreets, LLC**. Code submitted to SmartyStreets projects becomes property of SmartyStreets and must be compatible with the associated license. +- _Testing:_ If the code you are submitting resides in packages/modules covered by automated tests, be sure to add passing tests that cover your changes and assert expected behavior and state. Submit the additional test cases as part of your change set. +- _Style:_ Match your approach to **naming** and **formatting** with the surrounding code. Basically, the code you submit shouldn't stand out. + - "Naming" refers to such constructs as variables, methods, functions, classes, structs, interfaces, packages, modules, directories, files, etc... + - "Formatting" refers to such constructs as whitespace, horizontal line length, vertical function length, vertical file length, indentation, curly braces, etc... diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/LICENSE.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/LICENSE.md new file mode 100644 index 0000000000..8ea6f94552 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/LICENSE.md @@ -0,0 +1,23 @@ +Copyright (c) 2016 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/Makefile b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/Makefile new file mode 100755 index 0000000000..dc032df46c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/Makefile @@ -0,0 +1,11 @@ +#!/usr/bin/make -f + +test: + go test -timeout=1s -race -coverprofile=coverage.txt -covermode=atomic . + +compile: + go build ./... + +build: test compile + +.PHONY: test compile build diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/README.md new file mode 100644 index 0000000000..1cf5c3b604 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/README.md @@ -0,0 +1,623 @@ +[![Build Status](https://travis-ci.org/smartystreets/assertions.svg?branch=master)](https://travis-ci.org/smartystreets/assertions) +[![Code Coverage](https://codecov.io/gh/smartystreets/assertions/branch/master/graph/badge.svg)](https://codecov.io/gh/smartystreets/assertions) +[![Go Report Card](https://goreportcard.com/badge/github.com/smartystreets/assertions)](https://goreportcard.com/report/github.com/smartystreets/assertions) + +# assertions +-- + import "github.com/smartystreets/assertions" + +Package assertions contains the implementations for all assertions which are +referenced in goconvey's `convey` package +(github.com/smartystreets/goconvey/convey) and gunit +(github.com/smartystreets/gunit) for use with the So(...) method. They can also +be used in traditional Go test functions and even in applications. + +https://smartystreets.com + +Many of the assertions lean heavily on work done by Aaron Jacobs in his +excellent oglematchers library. (https://github.com/jacobsa/oglematchers) The +ShouldResemble assertion leans heavily on work done by Daniel Jacques in his +very helpful go-render library. (https://github.com/luci/go-render) + +## Usage + +#### func GoConveyMode + +```go +func GoConveyMode(yes bool) +``` +GoConveyMode provides control over JSON serialization of failures. When using +the assertions in this package from the convey package JSON results are very +helpful and can be rendered in a DIFF view. In that case, this function will be +called with a true value to enable the JSON serialization. By default, the +assertions in this package will not serializer a JSON result, making standalone +usage more convenient. + +#### func ShouldAlmostEqual + +```go +func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string +``` +ShouldAlmostEqual makes sure that two parameters are close enough to being +equal. The acceptable delta may be specified with a third argument, or a very +small default delta will be used. + +#### func ShouldBeBetween + +```go +func ShouldBeBetween(actual interface{}, expected ...interface{}) string +``` +ShouldBeBetween receives exactly three parameters: an actual value, a lower +bound, and an upper bound. It ensures that the actual value is between both +bounds (but not equal to either of them). + +#### func ShouldBeBetweenOrEqual + +```go +func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string +``` +ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a +lower bound, and an upper bound. It ensures that the actual value is between +both bounds or equal to one of them. + +#### func ShouldBeBlank + +```go +func ShouldBeBlank(actual interface{}, expected ...interface{}) string +``` +ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal +to "". + +#### func ShouldBeChronological + +```go +func ShouldBeChronological(actual interface{}, expected ...interface{}) string +``` +ShouldBeChronological receives a []time.Time slice and asserts that they are in +chronological order starting with the first time.Time as the earliest. + +#### func ShouldBeEmpty + +```go +func ShouldBeEmpty(actual interface{}, expected ...interface{}) string +``` +ShouldBeEmpty receives a single parameter (actual) and determines whether or not +calling len(actual) would return `0`. It obeys the rules specified by the len +function for determining length: http://golang.org/pkg/builtin/#len + +#### func ShouldBeError + +```go +func ShouldBeError(actual interface{}, expected ...interface{}) string +``` +ShouldBeError asserts that the first argument implements the error interface. It +also compares the first argument against the second argument if provided (which +must be an error message string or another error value). + +#### func ShouldBeFalse + +```go +func ShouldBeFalse(actual interface{}, expected ...interface{}) string +``` +ShouldBeFalse receives a single parameter and ensures that it is false. + +#### func ShouldBeGreaterThan + +```go +func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string +``` +ShouldBeGreaterThan receives exactly two parameters and ensures that the first +is greater than the second. + +#### func ShouldBeGreaterThanOrEqualTo + +```go +func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string +``` +ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that +the first is greater than or equal to the second. + +#### func ShouldBeIn + +```go +func ShouldBeIn(actual interface{}, expected ...interface{}) string +``` +ShouldBeIn receives at least 2 parameters. The first is a proposed member of the +collection that is passed in either as the second parameter, or of the +collection that is comprised of all the remaining parameters. This assertion +ensures that the proposed member is in the collection (using ShouldEqual). + +#### func ShouldBeLessThan + +```go +func ShouldBeLessThan(actual interface{}, expected ...interface{}) string +``` +ShouldBeLessThan receives exactly two parameters and ensures that the first is +less than the second. + +#### func ShouldBeLessThanOrEqualTo + +```go +func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string +``` +ShouldBeLessThan receives exactly two parameters and ensures that the first is +less than or equal to the second. + +#### func ShouldBeNil + +```go +func ShouldBeNil(actual interface{}, expected ...interface{}) string +``` +ShouldBeNil receives a single parameter and ensures that it is nil. + +#### func ShouldBeTrue + +```go +func ShouldBeTrue(actual interface{}, expected ...interface{}) string +``` +ShouldBeTrue receives a single parameter and ensures that it is true. + +#### func ShouldBeZeroValue + +```go +func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string +``` +ShouldBeZeroValue receives a single parameter and ensures that it is the Go +equivalent of the default value, or "zero" value. + +#### func ShouldContain + +```go +func ShouldContain(actual interface{}, expected ...interface{}) string +``` +ShouldContain receives exactly two parameters. The first is a slice and the +second is a proposed member. Membership is determined using ShouldEqual. + +#### func ShouldContainKey + +```go +func ShouldContainKey(actual interface{}, expected ...interface{}) string +``` +ShouldContainKey receives exactly two parameters. The first is a map and the +second is a proposed key. Keys are compared with a simple '=='. + +#### func ShouldContainSubstring + +```go +func ShouldContainSubstring(actual interface{}, expected ...interface{}) string +``` +ShouldContainSubstring receives exactly 2 string parameters and ensures that the +first contains the second as a substring. + +#### func ShouldEndWith + +```go +func ShouldEndWith(actual interface{}, expected ...interface{}) string +``` +ShouldEndWith receives exactly 2 string parameters and ensures that the first +ends with the second. + +#### func ShouldEqual + +```go +func ShouldEqual(actual interface{}, expected ...interface{}) string +``` +ShouldEqual receives exactly two parameters and does an equality check using the +following semantics: 1. If the expected and actual values implement an Equal +method in the form `func (this T) Equal(that T) bool` then call the method. If +true, they are equal. 2. The expected and actual values are judged equal or not +by oglematchers.Equals. + +#### func ShouldEqualJSON + +```go +func ShouldEqualJSON(actual interface{}, expected ...interface{}) string +``` +ShouldEqualJSON receives exactly two parameters and does an equality check by +marshalling to JSON + +#### func ShouldEqualTrimSpace + +```go +func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string +``` +ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the +first is equal to the second after removing all leading and trailing whitespace +using strings.TrimSpace(first). + +#### func ShouldEqualWithout + +```go +func ShouldEqualWithout(actual interface{}, expected ...interface{}) string +``` +ShouldEqualWithout receives exactly 3 string parameters and ensures that the +first is equal to the second after removing all instances of the third from the +first using strings.Replace(first, third, "", -1). + +#### func ShouldHappenAfter + +```go +func ShouldHappenAfter(actual interface{}, expected ...interface{}) string +``` +ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the +first happens after the second. + +#### func ShouldHappenBefore + +```go +func ShouldHappenBefore(actual interface{}, expected ...interface{}) string +``` +ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the +first happens before the second. + +#### func ShouldHappenBetween + +```go +func ShouldHappenBetween(actual interface{}, expected ...interface{}) string +``` +ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the +first happens between (not on) the second and third. + +#### func ShouldHappenOnOrAfter + +```go +func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string +``` +ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that +the first happens on or after the second. + +#### func ShouldHappenOnOrBefore + +```go +func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string +``` +ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that +the first happens on or before the second. + +#### func ShouldHappenOnOrBetween + +```go +func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string +``` +ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that +the first happens between or on the second and third. + +#### func ShouldHappenWithin + +```go +func ShouldHappenWithin(actual interface{}, expected ...interface{}) string +``` +ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 +arguments) and asserts that the first time.Time happens within or on the +duration specified relative to the other time.Time. + +#### func ShouldHaveLength + +```go +func ShouldHaveLength(actual interface{}, expected ...interface{}) string +``` +ShouldHaveLength receives 2 parameters. The first is a collection to check the +length of, the second being the expected length. It obeys the rules specified by +the len function for determining length: http://golang.org/pkg/builtin/#len + +#### func ShouldHaveSameTypeAs + +```go +func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string +``` +ShouldHaveSameTypeAs receives exactly two parameters and compares their +underlying types for equality. + +#### func ShouldImplement + +```go +func ShouldImplement(actual interface{}, expectedList ...interface{}) string +``` +ShouldImplement receives exactly two parameters and ensures that the first +implements the interface type of the second. + +#### func ShouldNotAlmostEqual + +```go +func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string +``` +ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual + +#### func ShouldNotBeBetween + +```go +func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeBetween receives exactly three parameters: an actual value, a lower +bound, and an upper bound. It ensures that the actual value is NOT between both +bounds. + +#### func ShouldNotBeBetweenOrEqual + +```go +func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a +lower bound, and an upper bound. It ensures that the actual value is nopt +between the bounds nor equal to either of them. + +#### func ShouldNotBeBlank + +```go +func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is +equal to "". + +#### func ShouldNotBeChronological + +```go +func ShouldNotBeChronological(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeChronological receives a []time.Time slice and asserts that they are +NOT in chronological order. + +#### func ShouldNotBeEmpty + +```go +func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeEmpty receives a single parameter (actual) and determines whether or +not calling len(actual) would return a value greater than zero. It obeys the +rules specified by the `len` function for determining length: +http://golang.org/pkg/builtin/#len + +#### func ShouldNotBeIn + +```go +func ShouldNotBeIn(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of +the collection that is passed in either as the second parameter, or of the +collection that is comprised of all the remaining parameters. This assertion +ensures that the proposed member is NOT in the collection (using ShouldEqual). + +#### func ShouldNotBeNil + +```go +func ShouldNotBeNil(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeNil receives a single parameter and ensures that it is not nil. + +#### func ShouldNotBeZeroValue + +```go +func ShouldNotBeZeroValue(actual interface{}, expected ...interface{}) string +``` +ShouldBeZeroValue receives a single parameter and ensures that it is NOT the Go +equivalent of the default value, or "zero" value. + +#### func ShouldNotContain + +```go +func ShouldNotContain(actual interface{}, expected ...interface{}) string +``` +ShouldNotContain receives exactly two parameters. The first is a slice and the +second is a proposed member. Membership is determinied using ShouldEqual. + +#### func ShouldNotContainKey + +```go +func ShouldNotContainKey(actual interface{}, expected ...interface{}) string +``` +ShouldNotContainKey receives exactly two parameters. The first is a map and the +second is a proposed absent key. Keys are compared with a simple '=='. + +#### func ShouldNotContainSubstring + +```go +func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string +``` +ShouldNotContainSubstring receives exactly 2 string parameters and ensures that +the first does NOT contain the second as a substring. + +#### func ShouldNotEndWith + +```go +func ShouldNotEndWith(actual interface{}, expected ...interface{}) string +``` +ShouldEndWith receives exactly 2 string parameters and ensures that the first +does not end with the second. + +#### func ShouldNotEqual + +```go +func ShouldNotEqual(actual interface{}, expected ...interface{}) string +``` +ShouldNotEqual receives exactly two parameters and does an inequality check. See +ShouldEqual for details on how equality is determined. + +#### func ShouldNotHappenOnOrBetween + +```go +func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string +``` +ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts +that the first does NOT happen between or on the second or third. + +#### func ShouldNotHappenWithin + +```go +func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string +``` +ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 +arguments) and asserts that the first time.Time does NOT happen within or on the +duration specified relative to the other time.Time. + +#### func ShouldNotHaveSameTypeAs + +```go +func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string +``` +ShouldNotHaveSameTypeAs receives exactly two parameters and compares their +underlying types for inequality. + +#### func ShouldNotImplement + +```go +func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string +``` +ShouldNotImplement receives exactly two parameters and ensures that the first +does NOT implement the interface type of the second. + +#### func ShouldNotPanic + +```go +func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) +``` +ShouldNotPanic receives a void, niladic function and expects to execute the +function without any panic. + +#### func ShouldNotPanicWith + +```go +func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) +``` +ShouldNotPanicWith receives a void, niladic function and expects to recover a +panic whose content differs from the second argument. + +#### func ShouldNotPointTo + +```go +func ShouldNotPointTo(actual interface{}, expected ...interface{}) string +``` +ShouldNotPointTo receives exactly two parameters and checks to see that they +point to different addresess. + +#### func ShouldNotResemble + +```go +func ShouldNotResemble(actual interface{}, expected ...interface{}) string +``` +ShouldNotResemble receives exactly two parameters and does an inverse deep equal +check (see reflect.DeepEqual) + +#### func ShouldNotStartWith + +```go +func ShouldNotStartWith(actual interface{}, expected ...interface{}) string +``` +ShouldNotStartWith receives exactly 2 string parameters and ensures that the +first does not start with the second. + +#### func ShouldPanic + +```go +func ShouldPanic(actual interface{}, expected ...interface{}) (message string) +``` +ShouldPanic receives a void, niladic function and expects to recover a panic. + +#### func ShouldPanicWith + +```go +func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) +``` +ShouldPanicWith receives a void, niladic function and expects to recover a panic +with the second argument as the content. + +#### func ShouldPointTo + +```go +func ShouldPointTo(actual interface{}, expected ...interface{}) string +``` +ShouldPointTo receives exactly two parameters and checks to see that they point +to the same address. + +#### func ShouldResemble + +```go +func ShouldResemble(actual interface{}, expected ...interface{}) string +``` +ShouldResemble receives exactly two parameters and does a deep equal check (see +reflect.DeepEqual) + +#### func ShouldStartWith + +```go +func ShouldStartWith(actual interface{}, expected ...interface{}) string +``` +ShouldStartWith receives exactly 2 string parameters and ensures that the first +starts with the second. + +#### func So + +```go +func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) +``` +So is a convenience function (as opposed to an inconvenience function?) for +running assertions on arbitrary arguments in any context, be it for testing or +even application logging. It allows you to perform assertion-like behavior (and +get nicely formatted messages detailing discrepancies) but without the program +blowing up or panicking. All that is required is to import this package and call +`So` with one of the assertions exported by this package as the second +parameter. The first return parameter is a boolean indicating if the assertion +was true. The second return parameter is the well-formatted message showing why +an assertion was incorrect, or blank if the assertion was correct. + +Example: + + if ok, message := So(x, ShouldBeGreaterThan, y); !ok { + log.Println(message) + } + +For an alternative implementation of So (that provides more flexible return +options) see the `So` function in the package at +github.com/smartystreets/assertions/assert. + +#### type Assertion + +```go +type Assertion struct { +} +``` + + +#### func New + +```go +func New(t testingT) *Assertion +``` +New swallows the *testing.T struct and prints failed assertions using t.Error. +Example: assertions.New(t).So(1, should.Equal, 1) + +#### func (*Assertion) Failed + +```go +func (this *Assertion) Failed() bool +``` +Failed reports whether any calls to So (on this Assertion instance) have failed. + +#### func (*Assertion) So + +```go +func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool +``` +So calls the standalone So function and additionally, calls t.Error in failure +scenarios. + +#### type FailureView + +```go +type FailureView struct { + Message string `json:"Message"` + Expected string `json:"Expected"` + Actual string `json:"Actual"` +} +``` + +This struct is also declared in +github.com/smartystreets/goconvey/convey/reporting. The json struct tags should +be equal in both declarations. + +#### type Serializer + +```go +type Serializer interface { + // contains filtered or unexported methods +} +``` diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert.go new file mode 100644 index 0000000000..5e614589d5 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert.go @@ -0,0 +1,117 @@ +package assert + +import ( + "fmt" + "io" + "os" + "reflect" + "runtime" + "strings" +) + +// Result contains a single assertion failure as an error. +// You should not create a Result directly, use So instead. +// Once created, a Result is read-only and only allows +// queries using the provided methods. +type Result struct { + invocation string + err error + + stdout io.Writer + logger *logger +} + +// So is a convenience function (as opposed to an inconvenience function?) +// for running assertions on arbitrary arguments in any context. It allows you to perform +// assertion-like behavior and decide what happens in the event of a failure. +// It is a variant of assertions.So in every respect except its return value. +// In this case, the return value is a *Result which possesses several of its +// own convenience methods: +// +// fmt.Println(assert.So(1, should.Equal, 1)) // Calls String() and prints the representation of the assertion. +// assert.So(1, should.Equal, 1).Println() // Calls fmt.Print with the failure message and file:line header. +// assert.So(1, should.Equal, 1).Log() // Calls log.Print with the failure message and file:line header. +// assert.So(1, should.Equal, 1).Panic() // Calls log.Panic with the failure message and file:line header. +// assert.So(1, should.Equal, 1).Fatal() // Calls log.Fatal with the failure message and file:line header. +// if err := assert.So(1, should.Equal, 1).Error(); err != nil { +// // Allows custom handling of the error, which will include the failure message and file:line header. +// } +func So(actual interface{}, assert assertion, expected ...interface{}) *Result { + result := new(Result) + result.stdout = os.Stdout + result.invocation = fmt.Sprintf("So(actual: %v, %v, expected: %v)", actual, assertionName(assert), expected) + if failure := assert(actual, expected...); len(failure) > 0 { + _, file, line, _ := runtime.Caller(1) + result.err = fmt.Errorf("Assertion failure at %s:%d\n%s", file, line, failure) + } + return result +} +func assertionName(i interface{}) string { + functionAddress := runtime.FuncForPC(reflect.ValueOf(i).Pointer()) + fullNameStartingWithPackage := functionAddress.Name() + parts := strings.Split(fullNameStartingWithPackage, "/") + baseName := parts[len(parts)-1] + return strings.Replace(baseName, "assertions.Should", "should.", 1) +} + +// Failed returns true if the assertion failed, false if it passed. +func (this *Result) Failed() bool { + return !this.Passed() +} + +// Passed returns true if the assertion passed, false if it failed. +func (this *Result) Passed() bool { + return this.err == nil +} + +// Error returns the error representing an assertion failure, which is nil in the case of a passed assertion. +func (this *Result) Error() error { + return this.err +} + +// String implements fmt.Stringer. +// It returns the error as a string in the case of an assertion failure. +// Unlike other methods defined herein, if returns a non-empty +// representation of the assertion as confirmation of success. +func (this *Result) String() string { + if this.Passed() { + return fmt.Sprintf("✔ %s", this.invocation) + } else { + return fmt.Sprintf("✘ %s\n%v", this.invocation, this.Error()) + } +} + +// Println calls fmt.Println in the case of an assertion failure. +func (this *Result) Println() *Result { + if this.Failed() { + fmt.Fprintln(this.stdout, this) + } + return this +} + +// Log calls log.Print in the case of an assertion failure. +func (this *Result) Log() *Result { + if this.Failed() { + this.logger.Print(this) + } + return this +} + +// Panic calls log.Panic in the case of an assertion failure. +func (this *Result) Panic() *Result { + if this.Failed() { + this.logger.Panic(this) + } + return this +} + +// Fatal calls log.Fatal in the case of an assertion failure. +func (this *Result) Fatal() *Result { + if this.Failed() { + this.logger.Fatal(this) + } + return this +} + +// assertion is a copy of github.com/smartystreets/assertions.assertion. +type assertion func(actual interface{}, expected ...interface{}) string diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert_failed_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert_failed_test.go new file mode 100644 index 0000000000..778179c273 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert_failed_test.go @@ -0,0 +1,60 @@ +package assert + +import ( + "testing" + + "github.com/smartystreets/assertions/internal/unit" + "github.com/smartystreets/assertions/should" +) + +func TestFailedResultFixture(t *testing.T) { + unit.Run(new(FailedResultFixture), t) +} + +type FailedResultFixture struct { + *unit.Fixture + + result *Result +} + +func (this *FailedResultFixture) Setup() { + this.result = So(1, should.Equal, 2) + this.result.logger = capture() + this.result.stdout = this.result.logger.Log +} + +func (this *FailedResultFixture) assertLogMessageContents() { + this.So(this.result.logger.Log.String(), should.ContainSubstring, "✘ So(actual: 1, should.Equal, expected: [2])") + this.So(this.result.logger.Log.String(), should.ContainSubstring, "Assertion failure at ") + this.So(this.result.logger.Log.String(), should.EndWith, "Expected: '2'\nActual: '1'\n(Should be equal)\n") +} + +func (this *FailedResultFixture) TestQueryFunctions() { + this.So(this.result.Failed(), should.BeTrue) + this.So(this.result.Passed(), should.BeFalse) + this.So(this.result.logger.Log.Len(), should.Equal, 0) + + this.result.logger.Print(this.result.String()) + this.result.logger.Print(this.result.Error()) + this.assertLogMessageContents() +} + +func (this *FailedResultFixture) TestPrintln() { + this.So(this.result.Println(), should.Equal, this.result) + this.assertLogMessageContents() +} + +func (this *FailedResultFixture) TestLog() { + this.So(this.result.Log(), should.Equal, this.result) + this.assertLogMessageContents() +} + +func (this *FailedResultFixture) TestPanic() { + this.So(func() { this.result.Panic() }, should.Panic) + this.assertLogMessageContents() +} + +func (this *FailedResultFixture) TestFatal() { + this.So(this.result.Fatal(), should.Equal, this.result) + this.assertLogMessageContents() +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert_passed_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert_passed_test.go new file mode 100644 index 0000000000..615dcfc42f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/assert_passed_test.go @@ -0,0 +1,47 @@ +package assert + +import ( + "testing" + + "github.com/smartystreets/assertions/internal/unit" + "github.com/smartystreets/assertions/should" +) + +func TestPassedResultFixture(t *testing.T) { + unit.Run(new(PassedResultFixture), t) +} + +type PassedResultFixture struct { + *unit.Fixture + + result *Result +} + +func (this *PassedResultFixture) Setup() { + this.result = So(1, should.Equal, 1) + this.result.logger = capture() + this.result.stdout = this.result.logger.Log +} + +func (this *PassedResultFixture) TestQueryFunctions() { + this.So(this.result.Error(), should.BeNil) + this.So(this.result.Failed(), should.BeFalse) + this.So(this.result.Passed(), should.BeTrue) + this.So(this.result.String(), should.Equal, "✔ So(actual: 1, should.Equal, expected: [1])") +} +func (this *PassedResultFixture) TestPrintln() { + this.So(this.result.Println(), should.Equal, this.result) + this.So(this.result.logger.Log.String(), should.BeBlank) +} +func (this *PassedResultFixture) TestLog() { + this.So(this.result.Log(), should.Equal, this.result) + this.So(this.result.logger.Log.String(), should.BeBlank) +} +func (this *PassedResultFixture) TestPanic() { + this.So(this.result.Panic(), should.Equal, this.result) + this.So(this.result.logger.Log.String(), should.BeBlank) +} +func (this *PassedResultFixture) TestFatal() { + this.So(this.result.Fatal(), should.Equal, this.result) + this.So(this.result.logger.Log.String(), should.BeBlank) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/example/main.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/example/main.go new file mode 100644 index 0000000000..4e80a7343e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/example/main.go @@ -0,0 +1,49 @@ +package main + +import ( + "fmt" + + "github.com/smartystreets/assertions/assert" + "github.com/smartystreets/assertions/should" +) + +func main() { + exampleUsage(assert.So(1, should.Equal, 1)) // pass + exampleUsage(assert.So(1, should.Equal, 2)) // fail +} + +func exampleUsage(result *assert.Result) { + if result.Passed() { + fmt.Println("The assertion passed:", result) + } else if result.Failed() { + fmt.Println("The assertion failed:", result) + } + + fmt.Print("\nAbout to see result.Error()...\n\n") + + if err := result.Error(); err != nil { + fmt.Println(err) + } + + fmt.Print("\nAbout to see result.Println()...\n\n") + + result.Println() + + fmt.Print("\nAbout to see result.Log()...\n\n") + + result.Log() + + fmt.Print("\nAbout to see result.Panic()...\n\n") + + defer func() { + recover() + + fmt.Print("\nAbout to see result.Fatal()...\n\n") + + result.Fatal() + + fmt.Print("---------------------------------------------------------------\n\n") + }() + + result.Panic() +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/logger.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/logger.go new file mode 100644 index 0000000000..86176d6842 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/assert/logger.go @@ -0,0 +1,76 @@ +package assert + +import ( + "bytes" + "fmt" + "log" + "os" +) + +// logger is meant be included as a pointer field on a struct. Leaving the +// instance as a nil reference will cause any calls on the *logger to forward +// to the corresponding functions from the standard log package. This is meant +// to be the behavior in production. In testing, set the field to a non-nil +// instance of a *logger to record log statements for later inspection. +type logger struct { + *log.Logger + + Log *bytes.Buffer + Calls int +} + +// capture creates a new *logger instance with an internal buffer. The prefix +// and flags default to the values of log.Prefix() and log.Flags(), respectively. +// This function is meant to be called from test code. See the godoc for the +// logger struct for details. +func capture() *logger { + out := new(bytes.Buffer) + inner := log.New(out, log.Prefix(), log.Flags()) + inner.SetPrefix("") + return &logger{ + Log: out, + Logger: inner, + } +} + +// Fatal -> log.Fatal (except in testing it uses log.Print) +func (this *logger) Fatal(v ...interface{}) { + if this == nil { + this.Output(3, fmt.Sprint(v...)) + os.Exit(1) + } else { + this.Calls++ + this.Logger.Print(v...) + } +} + +// Panic -> log.Panic +func (this *logger) Panic(v ...interface{}) { + if this == nil { + s := fmt.Sprint(v...) + this.Output(3, s) + panic(s) + } else { + this.Calls++ + this.Logger.Panic(v...) + } +} + +// Print -> log.Print +func (this *logger) Print(v ...interface{}) { + if this == nil { + this.Output(3, fmt.Sprint(v...)) + } else { + this.Calls++ + this.Logger.Print(v...) + } +} + +// Output -> log.Output +func (this *logger) Output(calldepth int, s string) error { + if this == nil { + return log.Output(calldepth, s) + } + this.Calls++ + return this.Logger.Output(calldepth, s) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/collections.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/collections.go new file mode 100644 index 0000000000..b534d4bafa --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/collections.go @@ -0,0 +1,244 @@ +package assertions + +import ( + "fmt" + "reflect" + + "github.com/smartystreets/assertions/internal/oglematchers" +) + +// ShouldContain receives exactly two parameters. The first is a slice and the +// second is a proposed member. Membership is determined using ShouldEqual. +func ShouldContain(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { + typeName := reflect.TypeOf(actual) + + if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { + return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) + } + return fmt.Sprintf(shouldHaveContained, typeName, expected[0]) + } + return success +} + +// ShouldNotContain receives exactly two parameters. The first is a slice and the +// second is a proposed member. Membership is determinied using ShouldEqual. +func ShouldNotContain(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + typeName := reflect.TypeOf(actual) + + if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { + if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { + return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) + } + return success + } + return fmt.Sprintf(shouldNotHaveContained, typeName, expected[0]) +} + +// ShouldContainKey receives exactly two parameters. The first is a map and the +// second is a proposed key. Keys are compared with a simple '=='. +func ShouldContainKey(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + keys, isMap := mapKeys(actual) + if !isMap { + return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual)) + } + + if !keyFound(keys, expected[0]) { + return fmt.Sprintf(shouldHaveContainedKey, reflect.TypeOf(actual), expected) + } + + return "" +} + +// ShouldNotContainKey receives exactly two parameters. The first is a map and the +// second is a proposed absent key. Keys are compared with a simple '=='. +func ShouldNotContainKey(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + keys, isMap := mapKeys(actual) + if !isMap { + return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual)) + } + + if keyFound(keys, expected[0]) { + return fmt.Sprintf(shouldNotHaveContainedKey, reflect.TypeOf(actual), expected) + } + + return "" +} + +func mapKeys(m interface{}) ([]reflect.Value, bool) { + value := reflect.ValueOf(m) + if value.Kind() != reflect.Map { + return nil, false + } + return value.MapKeys(), true +} +func keyFound(keys []reflect.Value, expectedKey interface{}) bool { + found := false + for _, key := range keys { + if key.Interface() == expectedKey { + found = true + } + } + return found +} + +// ShouldBeIn receives at least 2 parameters. The first is a proposed member of the collection +// that is passed in either as the second parameter, or of the collection that is comprised +// of all the remaining parameters. This assertion ensures that the proposed member is in +// the collection (using ShouldEqual). +func ShouldBeIn(actual interface{}, expected ...interface{}) string { + if fail := atLeast(1, expected); fail != success { + return fail + } + + if len(expected) == 1 { + return shouldBeIn(actual, expected[0]) + } + return shouldBeIn(actual, expected) +} +func shouldBeIn(actual interface{}, expected interface{}) string { + if matchError := oglematchers.Contains(actual).Matches(expected); matchError != nil { + return fmt.Sprintf(shouldHaveBeenIn, actual, reflect.TypeOf(expected)) + } + return success +} + +// ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of the collection +// that is passed in either as the second parameter, or of the collection that is comprised +// of all the remaining parameters. This assertion ensures that the proposed member is NOT in +// the collection (using ShouldEqual). +func ShouldNotBeIn(actual interface{}, expected ...interface{}) string { + if fail := atLeast(1, expected); fail != success { + return fail + } + + if len(expected) == 1 { + return shouldNotBeIn(actual, expected[0]) + } + return shouldNotBeIn(actual, expected) +} +func shouldNotBeIn(actual interface{}, expected interface{}) string { + if matchError := oglematchers.Contains(actual).Matches(expected); matchError == nil { + return fmt.Sprintf(shouldNotHaveBeenIn, actual, reflect.TypeOf(expected)) + } + return success +} + +// ShouldBeEmpty receives a single parameter (actual) and determines whether or not +// calling len(actual) would return `0`. It obeys the rules specified by the len +// function for determining length: http://golang.org/pkg/builtin/#len +func ShouldBeEmpty(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + + if actual == nil { + return success + } + + value := reflect.ValueOf(actual) + switch value.Kind() { + case reflect.Slice: + if value.Len() == 0 { + return success + } + case reflect.Chan: + if value.Len() == 0 { + return success + } + case reflect.Map: + if value.Len() == 0 { + return success + } + case reflect.String: + if value.Len() == 0 { + return success + } + case reflect.Ptr: + elem := value.Elem() + kind := elem.Kind() + if (kind == reflect.Slice || kind == reflect.Array) && elem.Len() == 0 { + return success + } + } + + return fmt.Sprintf(shouldHaveBeenEmpty, actual) +} + +// ShouldNotBeEmpty receives a single parameter (actual) and determines whether or not +// calling len(actual) would return a value greater than zero. It obeys the rules +// specified by the `len` function for determining length: http://golang.org/pkg/builtin/#len +func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + + if empty := ShouldBeEmpty(actual, expected...); empty != success { + return success + } + return fmt.Sprintf(shouldNotHaveBeenEmpty, actual) +} + +// ShouldHaveLength receives 2 parameters. The first is a collection to check +// the length of, the second being the expected length. It obeys the rules +// specified by the len function for determining length: +// http://golang.org/pkg/builtin/#len +func ShouldHaveLength(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + var expectedLen int64 + lenValue := reflect.ValueOf(expected[0]) + switch lenValue.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + expectedLen = lenValue.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + expectedLen = int64(lenValue.Uint()) + default: + return fmt.Sprintf(shouldHaveBeenAValidInteger, reflect.TypeOf(expected[0])) + } + + if expectedLen < 0 { + return fmt.Sprintf(shouldHaveBeenAValidLength, expected[0]) + } + + value := reflect.ValueOf(actual) + switch value.Kind() { + case reflect.Slice, + reflect.Chan, + reflect.Map, + reflect.String: + if int64(value.Len()) == expectedLen { + return success + } else { + return fmt.Sprintf(shouldHaveHadLength, expectedLen, value.Len(), actual) + } + case reflect.Ptr: + elem := value.Elem() + kind := elem.Kind() + if kind == reflect.Slice || kind == reflect.Array { + if int64(elem.Len()) == expectedLen { + return success + } else { + return fmt.Sprintf(shouldHaveHadLength, expectedLen, elem.Len(), actual) + } + } + } + return fmt.Sprintf(shouldHaveBeenAValidCollection, reflect.TypeOf(actual)) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/collections_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/collections_test.go new file mode 100644 index 0000000000..d8e8f9c274 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/collections_test.go @@ -0,0 +1,185 @@ +package assertions + +import ( + "fmt" + "time" +) + +func (this *AssertionsFixture) TestShouldContainKey() { + this.fail(so(map[int]int{}, ShouldContainKey), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(map[int]int{}, ShouldContainKey, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(Thing1{}, ShouldContainKey, 1), "You must provide a valid map type (was assertions.Thing1)!") + this.fail(so(nil, ShouldContainKey, 1), "You must provide a valid map type (was )!") + this.fail(so(map[int]int{1: 41}, ShouldContainKey, 2), "Expected the map[int]int to contain the key: [2] (but it didn't)!") + + this.pass(so(map[int]int{1: 41}, ShouldContainKey, 1)) + this.pass(so(map[int]int{1: 41, 2: 42, 3: 43}, ShouldContainKey, 2)) +} + +func (this *AssertionsFixture) TestShouldNotContainKey() { + this.fail(so(map[int]int{}, ShouldNotContainKey), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(map[int]int{}, ShouldNotContainKey, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(Thing1{}, ShouldNotContainKey, 1), "You must provide a valid map type (was assertions.Thing1)!") + this.fail(so(nil, ShouldNotContainKey, 1), "You must provide a valid map type (was )!") + this.fail(so(map[int]int{1: 41}, ShouldNotContainKey, 1), "Expected the map[int]int NOT to contain the key: [1] (but it did)!") + this.pass(so(map[int]int{1: 41}, ShouldNotContainKey, 2)) +} + +func (this *AssertionsFixture) TestShouldContain() { + this.fail(so([]int{}, ShouldContain), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so([]int{}, ShouldContain, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(Thing1{}, ShouldContain, 1), "You must provide a valid container (was assertions.Thing1)!") + this.fail(so(nil, ShouldContain, 1), "You must provide a valid container (was )!") + this.fail(so([]int{1}, ShouldContain, 2), "Expected the container ([]int) to contain: '2' (but it didn't)!") + this.fail(so([][]int{{1}}, ShouldContain, []int{2}), "Expected the container ([][]int) to contain: '[2]' (but it didn't)!") + + this.pass(so([]int{1}, ShouldContain, 1)) + this.pass(so([]int{1, 2, 3}, ShouldContain, 2)) + this.pass(so([][]int{{1}, {2}, {3}}, ShouldContain, []int{2})) +} + +func (this *AssertionsFixture) TestShouldNotContain() { + this.fail(so([]int{}, ShouldNotContain), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so([]int{}, ShouldNotContain, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(Thing1{}, ShouldNotContain, 1), "You must provide a valid container (was assertions.Thing1)!") + this.fail(so(nil, ShouldNotContain, 1), "You must provide a valid container (was )!") + + this.fail(so([]int{1}, ShouldNotContain, 1), "Expected the container ([]int) NOT to contain: '1' (but it did)!") + this.fail(so([]int{1, 2, 3}, ShouldNotContain, 2), "Expected the container ([]int) NOT to contain: '2' (but it did)!") + this.fail(so([][]int{{1}, {2}, {3}}, ShouldNotContain, []int{2}), "Expected the container ([][]int) NOT to contain: '[2]' (but it did)!") + + this.pass(so([]int{1}, ShouldNotContain, 2)) + this.pass(so([][]int{{1}, {2}, {3}}, ShouldNotContain, []int{4})) +} + +func (this *AssertionsFixture) TestShouldBeIn() { + this.fail(so(4, ShouldBeIn), needNonEmptyCollection) + + container := []int{1, 2, 3, 4} + this.pass(so(4, ShouldBeIn, container)) + this.pass(so(4, ShouldBeIn, 1, 2, 3, 4)) + this.pass(so([]int{4}, ShouldBeIn, [][]int{{1}, {2}, {3}, {4}})) + this.pass(so([]int{4}, ShouldBeIn, []int{1}, []int{2}, []int{3}, []int{4})) + + this.fail(so(4, ShouldBeIn, 1, 2, 3), "Expected '4' to be in the container ([]interface {}), but it wasn't!") + this.fail(so(4, ShouldBeIn, []int{1, 2, 3}), "Expected '4' to be in the container ([]int), but it wasn't!") + this.fail(so([]int{4}, ShouldBeIn, []int{1}, []int{2}, []int{3}), "Expected '[4]' to be in the container ([]interface {}), but it wasn't!") + this.fail(so([]int{4}, ShouldBeIn, [][]int{{1}, {2}, {3}}), "Expected '[4]' to be in the container ([][]int), but it wasn't!") +} + +func (this *AssertionsFixture) TestShouldNotBeIn() { + this.fail(so(4, ShouldNotBeIn), needNonEmptyCollection) + + container := []int{1, 2, 3, 4} + this.pass(so(42, ShouldNotBeIn, container)) + this.pass(so(42, ShouldNotBeIn, 1, 2, 3, 4)) + this.pass(so([]int{42}, ShouldNotBeIn, []int{1}, []int{2}, []int{3}, []int{4})) + this.pass(so([]int{42}, ShouldNotBeIn, [][]int{{1}, {2}, {3}, {4}})) + + this.fail(so(2, ShouldNotBeIn, 1, 2, 3), "Expected '2' NOT to be in the container ([]interface {}), but it was!") + this.fail(so(2, ShouldNotBeIn, []int{1, 2, 3}), "Expected '2' NOT to be in the container ([]int), but it was!") + this.fail(so([]int{2}, ShouldNotBeIn, []int{1}, []int{2}, []int{3}), "Expected '[2]' NOT to be in the container ([]interface {}), but it was!") + this.fail(so([]int{2}, ShouldNotBeIn, [][]int{{1}, {2}, {3}}), "Expected '[2]' NOT to be in the container ([][]int), but it was!") +} + +func (this *AssertionsFixture) TestShouldBeEmpty() { + this.fail(so(1, ShouldBeEmpty, 2, 3), "This assertion requires exactly 0 comparison values (you provided 2).") + + this.pass(so([]int{}, ShouldBeEmpty)) // empty slice + this.pass(so([][]int{}, ShouldBeEmpty)) // empty slice + this.pass(so([]interface{}{}, ShouldBeEmpty)) // empty slice + this.pass(so(map[string]int{}, ShouldBeEmpty)) // empty map + this.pass(so("", ShouldBeEmpty)) // empty string + this.pass(so(&[]int{}, ShouldBeEmpty)) // pointer to empty slice + this.pass(so(&[0]int{}, ShouldBeEmpty)) // pointer to empty array + this.pass(so(nil, ShouldBeEmpty)) // nil + this.pass(so(make(chan string), ShouldBeEmpty)) // empty channel + + this.fail(so([]int{1}, ShouldBeEmpty), "Expected [1] to be empty (but it wasn't)!") // non-empty slice + this.fail(so([][]int{{1}}, ShouldBeEmpty), "Expected [[1]] to be empty (but it wasn't)!") // non-empty slice + this.fail(so([]interface{}{1}, ShouldBeEmpty), "Expected [1] to be empty (but it wasn't)!") // non-empty slice + this.fail(so(map[string]int{"hi": 0}, ShouldBeEmpty), "Expected map[hi:0] to be empty (but it wasn't)!") // non-empty map + this.fail(so("hi", ShouldBeEmpty), "Expected hi to be empty (but it wasn't)!") // non-empty string + this.fail(so(&[]int{1}, ShouldBeEmpty), "Expected &[1] to be empty (but it wasn't)!") // pointer to non-empty slice + this.fail(so(&[1]int{1}, ShouldBeEmpty), "Expected &[1] to be empty (but it wasn't)!") // pointer to non-empty array + c := make(chan int, 1) // non-empty channel + go func() { c <- 1 }() + time.Sleep(time.Millisecond) + this.fail(so(c, ShouldBeEmpty), fmt.Sprintf("Expected %+v to be empty (but it wasn't)!", c)) +} + +func (this *AssertionsFixture) TestShouldNotBeEmpty() { + this.fail(so(1, ShouldNotBeEmpty, 2, 3), "This assertion requires exactly 0 comparison values (you provided 2).") + + this.fail(so([]int{}, ShouldNotBeEmpty), "Expected [] to NOT be empty (but it was)!") // empty slice + this.fail(so([]interface{}{}, ShouldNotBeEmpty), "Expected [] to NOT be empty (but it was)!") // empty slice + this.fail(so(map[string]int{}, ShouldNotBeEmpty), "Expected map[] to NOT be empty (but it was)!") // empty map + this.fail(so("", ShouldNotBeEmpty), "Expected to NOT be empty (but it was)!") // empty string + this.fail(so(&[]int{}, ShouldNotBeEmpty), "Expected &[] to NOT be empty (but it was)!") // pointer to empty slice + this.fail(so(&[0]int{}, ShouldNotBeEmpty), "Expected &[] to NOT be empty (but it was)!") // pointer to empty array + this.fail(so(nil, ShouldNotBeEmpty), "Expected to NOT be empty (but it was)!") // nil + c := make(chan int, 0) // non-empty channel + this.fail(so(c, ShouldNotBeEmpty), fmt.Sprintf("Expected %+v to NOT be empty (but it was)!", c)) // empty channel + + this.pass(so([]int{1}, ShouldNotBeEmpty)) // non-empty slice + this.pass(so([]interface{}{1}, ShouldNotBeEmpty)) // non-empty slice + this.pass(so(map[string]int{"hi": 0}, ShouldNotBeEmpty)) // non-empty map + this.pass(so("hi", ShouldNotBeEmpty)) // non-empty string + this.pass(so(&[]int{1}, ShouldNotBeEmpty)) // pointer to non-empty slice + this.pass(so(&[1]int{1}, ShouldNotBeEmpty)) // pointer to non-empty array + c = make(chan int, 1) + go func() { c <- 1 }() + time.Sleep(time.Millisecond) + this.pass(so(c, ShouldNotBeEmpty)) +} + +func (this *AssertionsFixture) TestShouldHaveLength() { + this.fail(so(1, ShouldHaveLength, 2), "You must provide a valid container (was int)!") + this.fail(so(nil, ShouldHaveLength, 1), "You must provide a valid container (was )!") + this.fail(so("hi", ShouldHaveLength, float64(1.0)), "You must provide a valid integer (was float64)!") + this.fail(so([]string{}, ShouldHaveLength), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so([]string{}, ShouldHaveLength, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).") + this.fail(so([]string{}, ShouldHaveLength, -10), "You must provide a valid positive integer (was -10)!") + + this.fail(so([]int{}, ShouldHaveLength, 1), // empty slice + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: []") + + this.fail(so([]interface{}{}, ShouldHaveLength, 1), // empty slice + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: []") + + this.fail(so(map[string]int{}, ShouldHaveLength, 1), // empty map + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: map[]") + + this.fail(so("", ShouldHaveLength, 1), // empty string + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: ") + + this.fail(so(&[]int{}, ShouldHaveLength, 1), // pointer to empty slice + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: &[]") + + this.fail(so(&[0]int{}, ShouldHaveLength, 1), // pointer to empty array + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: &[]") + + c := make(chan int, 0) // non-empty channel + this.fail(so(c, ShouldHaveLength, 1), fmt.Sprintf( + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: %+v", c)) + + c = make(chan int) // empty channel + this.fail(so(c, ShouldHaveLength, 1), fmt.Sprintf( + "Expected collection to have length equal to [1], but it's length was [0] instead! contents: %+v", c)) + + this.pass(so([]int{1}, ShouldHaveLength, 1)) // non-empty slice + this.pass(so([]interface{}{1}, ShouldHaveLength, 1)) // non-empty slice + this.pass(so(map[string]int{"hi": 0}, ShouldHaveLength, 1)) // non-empty map + this.pass(so("hi", ShouldHaveLength, 2)) // non-empty string + this.pass(so(&[]int{1}, ShouldHaveLength, 1)) // pointer to non-empty slice + this.pass(so(&[1]int{1}, ShouldHaveLength, 1)) // pointer to non-empty array + c = make(chan int, 1) + go func() { c <- 1 }() + time.Sleep(time.Millisecond) + this.pass(so(c, ShouldHaveLength, 1)) + this.pass(so(c, ShouldHaveLength, uint(1))) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/doc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/doc.go new file mode 100644 index 0000000000..ba30a9261a --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/doc.go @@ -0,0 +1,109 @@ +// Package assertions contains the implementations for all assertions which +// are referenced in goconvey's `convey` package +// (github.com/smartystreets/goconvey/convey) and gunit (github.com/smartystreets/gunit) +// for use with the So(...) method. +// They can also be used in traditional Go test functions and even in +// applications. +// +// https://smartystreets.com +// +// Many of the assertions lean heavily on work done by Aaron Jacobs in his excellent oglematchers library. +// (https://github.com/jacobsa/oglematchers) +// The ShouldResemble assertion leans heavily on work done by Daniel Jacques in his very helpful go-render library. +// (https://github.com/luci/go-render) +package assertions + +import ( + "fmt" + "runtime" +) + +// By default we use a no-op serializer. The actual Serializer provides a JSON +// representation of failure results on selected assertions so the goconvey +// web UI can display a convenient diff. +var serializer Serializer = new(noopSerializer) + +// GoConveyMode provides control over JSON serialization of failures. When +// using the assertions in this package from the convey package JSON results +// are very helpful and can be rendered in a DIFF view. In that case, this function +// will be called with a true value to enable the JSON serialization. By default, +// the assertions in this package will not serializer a JSON result, making +// standalone usage more convenient. +func GoConveyMode(yes bool) { + if yes { + serializer = newSerializer() + } else { + serializer = new(noopSerializer) + } +} + +type testingT interface { + Error(args ...interface{}) +} + +type Assertion struct { + t testingT + failed bool +} + +// New swallows the *testing.T struct and prints failed assertions using t.Error. +// Example: assertions.New(t).So(1, should.Equal, 1) +func New(t testingT) *Assertion { + return &Assertion{t: t} +} + +// Failed reports whether any calls to So (on this Assertion instance) have failed. +func (this *Assertion) Failed() bool { + return this.failed +} + +// So calls the standalone So function and additionally, calls t.Error in failure scenarios. +func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool { + ok, result := So(actual, assert, expected...) + if !ok { + this.failed = true + _, file, line, _ := runtime.Caller(1) + this.t.Error(fmt.Sprintf("\n%s:%d\n%s", file, line, result)) + } + return ok +} + +// So is a convenience function (as opposed to an inconvenience function?) +// for running assertions on arbitrary arguments in any context, be it for testing or even +// application logging. It allows you to perform assertion-like behavior (and get nicely +// formatted messages detailing discrepancies) but without the program blowing up or panicking. +// All that is required is to import this package and call `So` with one of the assertions +// exported by this package as the second parameter. +// The first return parameter is a boolean indicating if the assertion was true. The second +// return parameter is the well-formatted message showing why an assertion was incorrect, or +// blank if the assertion was correct. +// +// Example: +// +// if ok, message := So(x, ShouldBeGreaterThan, y); !ok { +// log.Println(message) +// } +// +// For an alternative implementation of So (that provides more flexible return options) +// see the `So` function in the package at github.com/smartystreets/assertions/assert. +func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) { + if result := so(actual, assert, expected...); len(result) == 0 { + return true, result + } else { + return false, result + } +} + +// so is like So, except that it only returns the string message, which is blank if the +// assertion passed. Used to facilitate testing. +func so(actual interface{}, assert func(interface{}, ...interface{}) string, expected ...interface{}) string { + return assert(actual, expected...) +} + +// assertion is an alias for a function with a signature that the So() +// function can handle. Any future or custom assertions should conform to this +// method signature. The return value should be an empty string if the assertion +// passes and a well-formed failure message if not. +type assertion func(actual interface{}, expected ...interface{}) string + +//////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/doc_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/doc_test.go new file mode 100644 index 0000000000..faaaf9314f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/doc_test.go @@ -0,0 +1,74 @@ +package assertions + +import ( + "bytes" + "fmt" + "reflect" + "testing" +) + +func TestGoConveyModeAffectsSerializer(t *testing.T) { + if reflect.TypeOf(serializer) != reflect.TypeOf(new(noopSerializer)) { + t.Error("Expected noop serializer as default") + } + + GoConveyMode(true) + if reflect.TypeOf(serializer) != reflect.TypeOf(new(failureSerializer)) { + t.Error("Expected failure serializer") + } + + GoConveyMode(false) + if reflect.TypeOf(serializer) != reflect.TypeOf(new(noopSerializer)) { + t.Error("Expected noop serializer") + } +} + +func TestPassingAssertion(t *testing.T) { + fake := &FakeT{buffer: new(bytes.Buffer)} + assertion := New(fake) + passed := assertion.So(1, ShouldEqual, 1) + + if !passed { + t.Error("Assertion failed when it should have passed.") + } + if fake.buffer.Len() > 0 { + t.Error("Unexpected error message was printed.") + } +} + +func TestFailingAssertion(t *testing.T) { + fake := &FakeT{buffer: new(bytes.Buffer)} + assertion := New(fake) + passed := assertion.So(1, ShouldEqual, 2) + + if passed { + t.Error("Assertion passed when it should have failed.") + } + if fake.buffer.Len() == 0 { + t.Error("Expected error message not printed.") + } +} + +func TestFailingGroupsOfAssertions(t *testing.T) { + fake := &FakeT{buffer: new(bytes.Buffer)} + assertion1 := New(fake) + assertion2 := New(fake) + + assertion1.So(1, ShouldEqual, 2) // fail + assertion2.So(1, ShouldEqual, 1) // pass + + if !assertion1.Failed() { + t.Error("Expected the first assertion to have been marked as failed.") + } + if assertion2.Failed() { + t.Error("Expected the second assertion to NOT have been marked as failed.") + } +} + +type FakeT struct { + buffer *bytes.Buffer +} + +func (this *FakeT) Error(args ...interface{}) { + fmt.Fprint(this.buffer, args...) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equal_method.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equal_method.go new file mode 100644 index 0000000000..c4fc38fab5 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equal_method.go @@ -0,0 +1,75 @@ +package assertions + +import "reflect" + +type equalityMethodSpecification struct { + a interface{} + b interface{} + + aType reflect.Type + bType reflect.Type + + equalMethod reflect.Value +} + +func newEqualityMethodSpecification(a, b interface{}) *equalityMethodSpecification { + return &equalityMethodSpecification{ + a: a, + b: b, + } +} + +func (this *equalityMethodSpecification) IsSatisfied() bool { + if !this.bothAreSameType() { + return false + } + if !this.typeHasEqualMethod() { + return false + } + if !this.equalMethodReceivesSameTypeForComparison() { + return false + } + if !this.equalMethodReturnsBool() { + return false + } + return true +} + +func (this *equalityMethodSpecification) bothAreSameType() bool { + this.aType = reflect.TypeOf(this.a) + if this.aType == nil { + return false + } + if this.aType.Kind() == reflect.Ptr { + this.aType = this.aType.Elem() + } + this.bType = reflect.TypeOf(this.b) + return this.aType == this.bType +} +func (this *equalityMethodSpecification) typeHasEqualMethod() bool { + aInstance := reflect.ValueOf(this.a) + this.equalMethod = aInstance.MethodByName("Equal") + return this.equalMethod != reflect.Value{} +} + +func (this *equalityMethodSpecification) equalMethodReceivesSameTypeForComparison() bool { + signature := this.equalMethod.Type() + return signature.NumIn() == 1 && signature.In(0) == this.aType +} + +func (this *equalityMethodSpecification) equalMethodReturnsBool() bool { + signature := this.equalMethod.Type() + return signature.NumOut() == 1 && signature.Out(0) == reflect.TypeOf(true) +} + +func (this *equalityMethodSpecification) AreEqual() bool { + a := reflect.ValueOf(this.a) + b := reflect.ValueOf(this.b) + return areEqual(a, b) && areEqual(b, a) +} +func areEqual(receiver reflect.Value, argument reflect.Value) bool { + equalMethod := receiver.MethodByName("Equal") + argumentList := []reflect.Value{argument} + result := equalMethod.Call(argumentList) + return result[0].Bool() +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equal_method_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equal_method_test.go new file mode 100644 index 0000000000..261d1682f4 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equal_method_test.go @@ -0,0 +1,155 @@ +package assertions + +import ( + "testing" + + "github.com/smartystreets/assertions/internal/unit" +) + +func TestEqualityFixture(t *testing.T) { + unit.Run(new(EqualityFixture), t) +} + +type EqualityFixture struct { + *unit.Fixture +} + +func (this *EqualityFixture) TestNilNil() { + spec := newEqualityMethodSpecification(nil, nil) + this.So(spec.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestEligible1() { + a := Eligible1{"hi"} + b := Eligible1{"hi"} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeTrue) + this.So(specification.AreEqual(), ShouldBeTrue) +} + +func (this *EqualityFixture) TestAreEqual() { + a := Eligible1{"hi"} + b := Eligible1{"hi"} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeTrue) + this.So(specification.AreEqual(), ShouldBeTrue) +} + +func (this *EqualityFixture) TestAreNotEqual() { + a := Eligible1{"hi"} + b := Eligible1{"bye"} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeTrue) + this.So(specification.AreEqual(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestEligible2() { + a := Eligible2{"hi"} + b := Eligible2{"hi"} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeTrue) +} + +func (this *EqualityFixture) TestEligible1_PointerReceiver() { + a := &Eligible1{"hi"} + b := Eligible1{"hi"} + this.So(a.Equal(b), ShouldBeTrue) + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeTrue) +} + +func (this *EqualityFixture) TestIneligible_PrimitiveTypes() { + specification := newEqualityMethodSpecification(1, 1) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_DisparateTypes() { + a := Eligible1{"hi"} + b := Eligible2{"hi"} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_NoEqualMethod() { + a := Ineligible_NoEqualMethod{} + b := Ineligible_NoEqualMethod{} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_EqualMethodReceivesNoInput() { + a := Ineligible_EqualMethodNoInputs{} + b := Ineligible_EqualMethodNoInputs{} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_EqualMethodReceivesTooManyInputs() { + a := Ineligible_EqualMethodTooManyInputs{} + b := Ineligible_EqualMethodTooManyInputs{} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_EqualMethodReceivesWrongInput() { + a := Ineligible_EqualMethodWrongInput{} + b := Ineligible_EqualMethodWrongInput{} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_EqualMethodReturnsNoOutputs() { + a := Ineligible_EqualMethodNoOutputs{} + b := Ineligible_EqualMethodNoOutputs{} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_EqualMethodReturnsTooManyOutputs() { + a := Ineligible_EqualMethodTooManyOutputs{} + b := Ineligible_EqualMethodTooManyOutputs{} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestIneligible_EqualMethodReturnsWrongOutputs() { + a := Ineligible_EqualMethodWrongOutput{} + b := Ineligible_EqualMethodWrongOutput{} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeFalse) +} + +func (this *EqualityFixture) TestEligibleAsymmetric_EqualMethodResultDiffersWhenArgumentsInverted() { + a := EligibleAsymmetric{a: 0} + b := EligibleAsymmetric{a: 1} + specification := newEqualityMethodSpecification(a, b) + this.So(specification.IsSatisfied(), ShouldBeTrue) + this.So(specification.AreEqual(), ShouldBeFalse) +} + +/**************************************************************************/ + +type ( + Eligible1 struct{ a string } + Eligible2 struct{ a string } + EligibleAsymmetric struct{ a int } + Ineligible_NoEqualMethod struct{} + Ineligible_EqualMethodNoInputs struct{} + Ineligible_EqualMethodNoOutputs struct{} + Ineligible_EqualMethodTooManyInputs struct{} + Ineligible_EqualMethodTooManyOutputs struct{} + Ineligible_EqualMethodWrongInput struct{} + Ineligible_EqualMethodWrongOutput struct{} +) + +func (this Eligible1) Equal(that Eligible1) bool { return this.a == that.a } +func (this Eligible2) Equal(that Eligible2) bool { return this.a == that.a } +func (this EligibleAsymmetric) Equal(that EligibleAsymmetric) bool { + return this.a == 0 +} +func (this Ineligible_EqualMethodNoInputs) Equal() bool { return true } +func (this Ineligible_EqualMethodNoOutputs) Equal(that Ineligible_EqualMethodNoOutputs) {} +func (this Ineligible_EqualMethodTooManyInputs) Equal(a, b bool) bool { return true } +func (this Ineligible_EqualMethodTooManyOutputs) Equal(bool) (bool, bool) { return true, true } +func (this Ineligible_EqualMethodWrongInput) Equal(a string) bool { return true } +func (this Ineligible_EqualMethodWrongOutput) Equal(Ineligible_EqualMethodWrongOutput) int { return 0 } diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality.go new file mode 100644 index 0000000000..37a49f4e25 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality.go @@ -0,0 +1,331 @@ +package assertions + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "strings" + + "github.com/smartystreets/assertions/internal/go-render/render" + "github.com/smartystreets/assertions/internal/oglematchers" +) + +// ShouldEqual receives exactly two parameters and does an equality check +// using the following semantics: +// 1. If the expected and actual values implement an Equal method in the form +// `func (this T) Equal(that T) bool` then call the method. If true, they are equal. +// 2. The expected and actual values are judged equal or not by oglematchers.Equals. +func ShouldEqual(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + return shouldEqual(actual, expected[0]) +} +func shouldEqual(actual, expected interface{}) (message string) { + defer func() { + if r := recover(); r != nil { + message = serializer.serialize(expected, actual, composeEqualityMismatchMessage(expected, actual)) + } + }() + + if spec := newEqualityMethodSpecification(expected, actual); spec.IsSatisfied() && spec.AreEqual() { + return success + } else if matchError := oglematchers.Equals(expected).Matches(actual); matchError == nil { + return success + } + + return serializer.serialize(expected, actual, composeEqualityMismatchMessage(expected, actual)) +} +func composeEqualityMismatchMessage(expected, actual interface{}) string { + var ( + renderedExpected = fmt.Sprintf("%v", expected) + renderedActual = fmt.Sprintf("%v", actual) + ) + + if renderedExpected != renderedActual { + return fmt.Sprintf(shouldHaveBeenEqual+composePrettyDiff(renderedExpected, renderedActual), expected, actual) + } else if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf(shouldHaveBeenEqualTypeMismatch, expected, expected, actual, actual) + } else { + return fmt.Sprintf(shouldHaveBeenEqualNoResemblance, renderedExpected) + } +} + +// ShouldNotEqual receives exactly two parameters and does an inequality check. +// See ShouldEqual for details on how equality is determined. +func ShouldNotEqual(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if ShouldEqual(actual, expected[0]) == success { + return fmt.Sprintf(shouldNotHaveBeenEqual, actual, expected[0]) + } + return success +} + +// ShouldAlmostEqual makes sure that two parameters are close enough to being equal. +// The acceptable delta may be specified with a third argument, +// or a very small default delta will be used. +func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string { + actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...) + + if err != "" { + return err + } + + if math.Abs(actualFloat-expectedFloat) <= deltaFloat { + return success + } else { + return fmt.Sprintf(shouldHaveBeenAlmostEqual, actualFloat, expectedFloat) + } +} + +// ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual +func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string { + actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...) + + if err != "" { + return err + } + + if math.Abs(actualFloat-expectedFloat) > deltaFloat { + return success + } else { + return fmt.Sprintf(shouldHaveNotBeenAlmostEqual, actualFloat, expectedFloat) + } +} + +func cleanAlmostEqualInput(actual interface{}, expected ...interface{}) (float64, float64, float64, string) { + deltaFloat := 0.0000000001 + + if len(expected) == 0 { + return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided neither)" + } else if len(expected) == 2 { + delta, err := getFloat(expected[1]) + + if err != nil { + return 0.0, 0.0, 0.0, "The delta value " + err.Error() + } + + deltaFloat = delta + } else if len(expected) > 2 { + return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided more values)" + } + + actualFloat, err := getFloat(actual) + if err != nil { + return 0.0, 0.0, 0.0, "The actual value " + err.Error() + } + + expectedFloat, err := getFloat(expected[0]) + if err != nil { + return 0.0, 0.0, 0.0, "The comparison value " + err.Error() + } + + return actualFloat, expectedFloat, deltaFloat, "" +} + +// returns the float value of any real number, or error if it is not a numerical type +func getFloat(num interface{}) (float64, error) { + numValue := reflect.ValueOf(num) + numKind := numValue.Kind() + + if numKind == reflect.Int || + numKind == reflect.Int8 || + numKind == reflect.Int16 || + numKind == reflect.Int32 || + numKind == reflect.Int64 { + return float64(numValue.Int()), nil + } else if numKind == reflect.Uint || + numKind == reflect.Uint8 || + numKind == reflect.Uint16 || + numKind == reflect.Uint32 || + numKind == reflect.Uint64 { + return float64(numValue.Uint()), nil + } else if numKind == reflect.Float32 || + numKind == reflect.Float64 { + return numValue.Float(), nil + } else { + return 0.0, errors.New("must be a numerical type, but was: " + numKind.String()) + } +} + +// ShouldEqualJSON receives exactly two parameters and does an equality check by marshalling to JSON +func ShouldEqualJSON(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + + expectedString, expectedErr := remarshal(expected[0].(string)) + if expectedErr != nil { + return "Expected value not valid JSON: " + expectedErr.Error() + } + + actualString, actualErr := remarshal(actual.(string)) + if actualErr != nil { + return "Actual value not valid JSON: " + actualErr.Error() + } + + return ShouldEqual(actualString, expectedString) +} +func remarshal(value string) (string, error) { + var structured interface{} + err := json.Unmarshal([]byte(value), &structured) + if err != nil { + return "", err + } + canonical, _ := json.Marshal(structured) + return string(canonical), nil +} + +// ShouldResemble receives exactly two parameters and does a deep equal check (see reflect.DeepEqual) +func ShouldResemble(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + + if matchError := oglematchers.DeepEquals(expected[0]).Matches(actual); matchError != nil { + renderedExpected, renderedActual := render.Render(expected[0]), render.Render(actual) + message := fmt.Sprintf(shouldHaveResembled, renderedExpected, renderedActual) + + composePrettyDiff(renderedExpected, renderedActual) + return serializer.serializeDetailed(expected[0], actual, message) + } + + return success +} + +// ShouldNotResemble receives exactly two parameters and does an inverse deep equal check (see reflect.DeepEqual) +func ShouldNotResemble(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } else if ShouldResemble(actual, expected[0]) == success { + return fmt.Sprintf(shouldNotHaveResembled, render.Render(actual), render.Render(expected[0])) + } + return success +} + +// ShouldPointTo receives exactly two parameters and checks to see that they point to the same address. +func ShouldPointTo(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + return shouldPointTo(actual, expected[0]) + +} +func shouldPointTo(actual, expected interface{}) string { + actualValue := reflect.ValueOf(actual) + expectedValue := reflect.ValueOf(expected) + + if ShouldNotBeNil(actual) != success { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "nil") + } else if ShouldNotBeNil(expected) != success { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "nil") + } else if actualValue.Kind() != reflect.Ptr { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "not") + } else if expectedValue.Kind() != reflect.Ptr { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "not") + } else if ShouldEqual(actualValue.Pointer(), expectedValue.Pointer()) != success { + actualAddress := reflect.ValueOf(actual).Pointer() + expectedAddress := reflect.ValueOf(expected).Pointer() + return serializer.serialize(expectedAddress, actualAddress, fmt.Sprintf(shouldHavePointedTo, + actual, actualAddress, + expected, expectedAddress)) + } + return success +} + +// ShouldNotPointTo receives exactly two parameters and checks to see that they point to different addresess. +func ShouldNotPointTo(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + compare := ShouldPointTo(actual, expected[0]) + if strings.HasPrefix(compare, shouldBePointers) { + return compare + } else if compare == success { + return fmt.Sprintf(shouldNotHavePointedTo, actual, expected[0], reflect.ValueOf(actual).Pointer()) + } + return success +} + +// ShouldBeNil receives a single parameter and ensures that it is nil. +func ShouldBeNil(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if actual == nil { + return success + } else if interfaceHasNilValue(actual) { + return success + } + return fmt.Sprintf(shouldHaveBeenNil, actual) +} +func interfaceHasNilValue(actual interface{}) bool { + value := reflect.ValueOf(actual) + kind := value.Kind() + nilable := kind == reflect.Slice || + kind == reflect.Chan || + kind == reflect.Func || + kind == reflect.Ptr || + kind == reflect.Map + + // Careful: reflect.Value.IsNil() will panic unless it's an interface, chan, map, func, slice, or ptr + // Reference: http://golang.org/pkg/reflect/#Value.IsNil + return nilable && value.IsNil() +} + +// ShouldNotBeNil receives a single parameter and ensures that it is not nil. +func ShouldNotBeNil(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if ShouldBeNil(actual) == success { + return fmt.Sprintf(shouldNotHaveBeenNil, actual) + } + return success +} + +// ShouldBeTrue receives a single parameter and ensures that it is true. +func ShouldBeTrue(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if actual != true { + return fmt.Sprintf(shouldHaveBeenTrue, actual) + } + return success +} + +// ShouldBeFalse receives a single parameter and ensures that it is false. +func ShouldBeFalse(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if actual != false { + return fmt.Sprintf(shouldHaveBeenFalse, actual) + } + return success +} + +// ShouldBeZeroValue receives a single parameter and ensures that it is +// the Go equivalent of the default value, or "zero" value. +func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface() + if !reflect.DeepEqual(zeroVal, actual) { + return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldHaveBeenZeroValue, actual)) + } + return success +} + +// ShouldBeZeroValue receives a single parameter and ensures that it is NOT +// the Go equivalent of the default value, or "zero" value. +func ShouldNotBeZeroValue(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface() + if reflect.DeepEqual(zeroVal, actual) { + return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldNotHaveBeenZeroValue, actual)) + } + return success +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality_diff.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality_diff.go new file mode 100644 index 0000000000..bd698ff62b --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality_diff.go @@ -0,0 +1,37 @@ +package assertions + +import ( + "fmt" + + "github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch" +) + +func composePrettyDiff(expected, actual string) string { + diff := diffmatchpatch.New() + diffs := diff.DiffMain(expected, actual, false) + if prettyDiffIsLikelyToBeHelpful(diffs) { + return fmt.Sprintf("\nDiff: '%s'", diff.DiffPrettyText(diffs)) + } + return "" +} + +// prettyDiffIsLikelyToBeHelpful returns true if the diff listing contains +// more 'equal' segments than 'deleted'/'inserted' segments. +func prettyDiffIsLikelyToBeHelpful(diffs []diffmatchpatch.Diff) bool { + equal, deleted, inserted := measureDiffTypeLengths(diffs) + return equal > deleted && equal > inserted +} + +func measureDiffTypeLengths(diffs []diffmatchpatch.Diff) (equal, deleted, inserted int) { + for _, segment := range diffs { + switch segment.Type { + case diffmatchpatch.DiffEqual: + equal += len(segment.Text) + case diffmatchpatch.DiffDelete: + deleted += len(segment.Text) + case diffmatchpatch.DiffInsert: + inserted += len(segment.Text) + } + } + return equal, deleted, inserted +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality_test.go new file mode 100644 index 0000000000..873dee4598 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/equality_test.go @@ -0,0 +1,353 @@ +package assertions + +import ( + "fmt" + "reflect" + "time" +) + +func (this *AssertionsFixture) TestShouldEqual() { + this.fail(so(1, ShouldEqual), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldEqual, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).") + this.fail(so(1, ShouldEqual, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.pass(so(1, ShouldEqual, 1)) + this.fail(so(1, ShouldEqual, 2), "2|1|Expected: '2' Actual: '1' (Should be equal)") + this.fail(so(1, ShouldEqual, "1"), "1|1|Expected: '1' (string) Actual: '1' (int) (Should be equal, type mismatch)") + + this.pass(so(nil, ShouldEqual, nil)) + + this.pass(so(true, ShouldEqual, true)) + this.fail(so(true, ShouldEqual, false), "false|true|Expected: 'false' Actual: 'true' (Should be equal)") + + this.pass(so("hi", ShouldEqual, "hi")) + this.fail(so("hi", ShouldEqual, "bye"), "bye|hi|Expected: 'bye' Actual: 'hi' (Should be equal)") + + this.pass(so(42, ShouldEqual, uint(42))) + + this.fail(so(Thing1{"hi"}, ShouldEqual, Thing1{}), "{}|{hi}|Expected: '{}' Actual: '{hi}' (Should be equal)") + this.fail(so(Thing1{"hi"}, ShouldEqual, Thing1{"hi"}), "{hi}|{hi}|Both the actual and expected values render equally ('{hi}') and their types are the same. Try using ShouldResemble instead.") + this.fail(so(&Thing1{"hi"}, ShouldEqual, &Thing1{"hi"}), "&{hi}|&{hi}|Both the actual and expected values render equally ('&{hi}') and their types are the same. Try using ShouldResemble instead.") + + this.fail(so(Thing1{}, ShouldEqual, Thing2{}), "{}|{}|Expected: '{}' (assertions.Thing2) Actual: '{}' (assertions.Thing1) (Should be equal, type mismatch)") + + this.pass(so(ThingWithEqualMethod{"hi"}, ShouldEqual, ThingWithEqualMethod{"hi"})) + this.fail(so(ThingWithEqualMethod{"hi"}, ShouldEqual, ThingWithEqualMethod{"bye"}), + "{bye}|{hi}|Expected: '{bye}' Actual: '{hi}' (Should be equal)") +} + +func (this *AssertionsFixture) TestTimeEqual() { + var ( + gopherCon, _ = time.LoadLocation("America/Denver") + elsewhere, _ = time.LoadLocation("America/New_York") + + timeNow = time.Now().In(gopherCon) + timeNowElsewhere = timeNow.In(elsewhere) + timeLater = timeNow.Add(time.Nanosecond) + ) + + this.pass(so(timeNow, ShouldNotResemble, timeNowElsewhere)) // Differing *Location field prevents ShouldResemble! + this.pass(so(timeNow, ShouldEqual, timeNowElsewhere)) // Time.Equal method used to determine exact instant. + this.pass(so(timeNow, ShouldNotEqual, timeLater)) +} + +func (this *AssertionsFixture) TestShouldNotEqual() { + this.fail(so(1, ShouldNotEqual), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldNotEqual, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).") + this.fail(so(1, ShouldNotEqual, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.pass(so(1, ShouldNotEqual, 2)) + this.pass(so(1, ShouldNotEqual, "1")) + this.fail(so(1, ShouldNotEqual, 1), "Expected '1' to NOT equal '1' (but it did)!") + + this.pass(so(true, ShouldNotEqual, false)) + this.fail(so(true, ShouldNotEqual, true), "Expected 'true' to NOT equal 'true' (but it did)!") + + this.pass(so("hi", ShouldNotEqual, "bye")) + this.fail(so("hi", ShouldNotEqual, "hi"), "Expected 'hi' to NOT equal 'hi' (but it did)!") + + this.pass(so(&Thing1{"hi"}, ShouldNotEqual, &Thing1{"hi"})) + this.pass(so(Thing1{"hi"}, ShouldNotEqual, Thing1{"hi"})) + this.pass(so(Thing1{}, ShouldNotEqual, Thing1{})) + this.pass(so(Thing1{}, ShouldNotEqual, Thing2{})) +} + +func (this *AssertionsFixture) TestShouldAlmostEqual() { + this.fail(so(1, ShouldAlmostEqual), "This assertion requires exactly one comparison value and an optional delta (you provided neither)") + this.fail(so(1, ShouldAlmostEqual, 1, 2, 3), "This assertion requires exactly one comparison value and an optional delta (you provided more values)") + this.fail(so(1, ShouldAlmostEqual, "1"), "The comparison value must be a numerical type, but was: string") + this.fail(so(1, ShouldAlmostEqual, 1, "1"), "The delta value must be a numerical type, but was: string") + this.fail(so("1", ShouldAlmostEqual, 1), "The actual value must be a numerical type, but was: string") + + // with the default delta + this.pass(so(0.99999999999999, ShouldAlmostEqual, uint(1))) + this.pass(so(1, ShouldAlmostEqual, 0.99999999999999)) + this.pass(so(1.3612499999999996, ShouldAlmostEqual, 1.36125)) + this.pass(so(0.7285312499999999, ShouldAlmostEqual, 0.72853125)) + this.fail(so(1, ShouldAlmostEqual, .99), "Expected '1' to almost equal '0.99' (but it didn't)!") + + // with a different delta + this.pass(so(100.0, ShouldAlmostEqual, 110.0, 10.0)) + this.fail(so(100.0, ShouldAlmostEqual, 111.0, 10.5), "Expected '100' to almost equal '111' (but it didn't)!") + + // various ints should work + this.pass(so(100, ShouldAlmostEqual, 100.0)) + this.pass(so(int(100), ShouldAlmostEqual, 100.0)) + this.pass(so(int8(100), ShouldAlmostEqual, 100.0)) + this.pass(so(int16(100), ShouldAlmostEqual, 100.0)) + this.pass(so(int32(100), ShouldAlmostEqual, 100.0)) + this.pass(so(int64(100), ShouldAlmostEqual, 100.0)) + this.pass(so(uint(100), ShouldAlmostEqual, 100.0)) + this.pass(so(uint8(100), ShouldAlmostEqual, 100.0)) + this.pass(so(uint16(100), ShouldAlmostEqual, 100.0)) + this.pass(so(uint32(100), ShouldAlmostEqual, 100.0)) + this.pass(so(uint64(100), ShouldAlmostEqual, 100.0)) + this.pass(so(100, ShouldAlmostEqual, 100.0)) + this.fail(so(100, ShouldAlmostEqual, 99.0), "Expected '100' to almost equal '99' (but it didn't)!") + + // floats should work + this.pass(so(float64(100.0), ShouldAlmostEqual, float32(100.0))) + this.fail(so(float32(100.0), ShouldAlmostEqual, 99.0, float32(0.1)), "Expected '100' to almost equal '99' (but it didn't)!") +} + +func (this *AssertionsFixture) TestShouldNotAlmostEqual() { + this.fail(so(1, ShouldNotAlmostEqual), "This assertion requires exactly one comparison value and an optional delta (you provided neither)") + this.fail(so(1, ShouldNotAlmostEqual, 1, 2, 3), "This assertion requires exactly one comparison value and an optional delta (you provided more values)") + + // with the default delta + this.fail(so(1, ShouldNotAlmostEqual, .99999999999999), "Expected '1' to NOT almost equal '0.99999999999999' (but it did)!") + this.fail(so(1.3612499999999996, ShouldNotAlmostEqual, 1.36125), "Expected '1.3612499999999996' to NOT almost equal '1.36125' (but it did)!") + this.pass(so(1, ShouldNotAlmostEqual, .99)) + + // with a different delta + this.fail(so(100.0, ShouldNotAlmostEqual, 110.0, 10.0), "Expected '100' to NOT almost equal '110' (but it did)!") + this.pass(so(100.0, ShouldNotAlmostEqual, 111.0, 10.5)) + + // ints should work + this.fail(so(100, ShouldNotAlmostEqual, 100.0), "Expected '100' to NOT almost equal '100' (but it did)!") + this.pass(so(100, ShouldNotAlmostEqual, 99.0)) + + // float32 should work + this.fail(so(float64(100.0), ShouldNotAlmostEqual, float32(100.0)), "Expected '100' to NOT almost equal '100' (but it did)!") + this.pass(so(float32(100.0), ShouldNotAlmostEqual, 99.0, float32(0.1))) +} + +func (this *AssertionsFixture) TestShouldResemble() { + this.fail(so(Thing1{"hi"}, ShouldResemble), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(Thing1{"hi"}, ShouldResemble, Thing1{"hi"}, Thing1{"hi"}), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(Thing1{"hi"}, ShouldResemble, Thing1{"hi"})) + this.fail(so(Thing1{"hi"}, ShouldResemble, Thing1{"bye"}), `{bye}|{hi}|Expected: 'assertions.Thing1{a:"bye"}' Actual: 'assertions.Thing1{a:"hi"}' (Should resemble)! Diff: 'assertions.Thing1{a:"byehi"}'`) + + var ( + a []int + b []int = []int{} + ) + + this.fail(so(a, ShouldResemble, b), `[]|[]|Expected: '[]int{}' Actual: '[]int(nil)' (Should resemble)!`) + this.fail(so(2, ShouldResemble, 1), `1|2|Expected: '1' Actual: '2' (Should resemble)!`) + + this.fail(so(StringStringMapAlias{"hi": "bye"}, ShouldResemble, map[string]string{"hi": "bye"}), + `map[hi:bye]|map[hi:bye]|Expected: 'map[string]string{"hi":"bye"}' Actual: 'assertions.StringStringMapAlias{"hi":"bye"}' (Should resemble)! Diff: 'map[ssertions.String]sStringMapAlias{"hi":"bye"}'`) + this.fail(so(StringSliceAlias{"hi", "bye"}, ShouldResemble, []string{"hi", "bye"}), + `[hi bye]|[hi bye]|Expected: '[]string{"hi", "bye"}' Actual: 'assertions.StringSliceAlias{"hi", "bye"}' (Should resemble)!`) + + // some types come out looking the same when represented with "%#v" so we show type mismatch info: + this.fail(so(StringAlias("hi"), ShouldResemble, "hi"), `hi|hi|Expected: '"hi"' Actual: 'assertions.StringAlias("hi")' (Should resemble)!`) + this.fail(so(IntAlias(42), ShouldResemble, 42), `42|42|Expected: '42' Actual: 'assertions.IntAlias(42)' (Should resemble)!`) +} + +func (this *AssertionsFixture) TestShouldNotResemble() { + this.fail(so(Thing1{"hi"}, ShouldNotResemble), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(Thing1{"hi"}, ShouldNotResemble, Thing1{"hi"}, Thing1{"hi"}), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(Thing1{"hi"}, ShouldNotResemble, Thing1{"bye"})) + this.fail(so(Thing1{"hi"}, ShouldNotResemble, Thing1{"hi"}), + `Expected '"assertions.Thing1{a:\"hi\"}"' to NOT resemble '"assertions.Thing1{a:\"hi\"}"' (but it did)!`) + + this.pass(so(map[string]string{"hi": "bye"}, ShouldResemble, map[string]string{"hi": "bye"})) + this.pass(so(IntAlias(42), ShouldNotResemble, 42)) + + this.pass(so(StringSliceAlias{"hi", "bye"}, ShouldNotResemble, []string{"hi", "bye"})) +} + +func (this *AssertionsFixture) TestShouldEqualJSON() { + this.fail(so("hi", ShouldEqualJSON), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so("hi", ShouldEqualJSON, "hi", "hi"), "This assertion requires exactly 1 comparison values (you provided 2).") + + // basic identity of keys/values + this.pass(so(`{"my":"val"}`, ShouldEqualJSON, `{"my":"val"}`)) + this.fail(so(`{"my":"val"}`, ShouldEqualJSON, `{"your":"val"}`), + `{"your":"val"}|{"my":"val"}|Expected: '{"your":"val"}' Actual: '{"my":"val"}' (Should be equal) Diff: '{"myour":"val"}'`) + + // out of order values causes comparison failure: + this.pass(so(`{"key0":"val0","key1":"val1"}`, ShouldEqualJSON, `{"key1":"val1","key0":"val0"}`)) + this.fail(so(`{"key0":"val0","key1":"val1"}`, ShouldEqualJSON, `{"key1":"val0","key0":"val0"}`), + `{"key0":"val0","key1":"val0"}|{"key0":"val0","key1":"val1"}|Expected: '{"key0":"val0","key1":"val0"}' Actual: '{"key0":"val0","key1":"val1"}' (Should be equal) Diff: '{"key0":"val0","key1":"val01"}'`) + + // missing values causes comparison failure: + this.fail(so( + `{"key0":"val0","key1":"val1"}`, + ShouldEqualJSON, + `{"key1":"val0"}`), + `{"key1":"val0"}|{"key0":"val0","key1":"val1"}|Expected: '{"key1":"val0"}' Actual: '{"key0":"val0","key1":"val1"}' (Should be equal)`) + + // whitespace shouldn't interfere with comparison: + this.pass(so("\n{ \"my\" : \"val\"\n}", ShouldEqualJSON, `{"my":"val"}`)) + + // Invalid JSON for either actual or expected value is invalid: + this.fail(so("{}", ShouldEqualJSON, ""), "Expected value not valid JSON: unexpected end of JSON input") + this.fail(so("", ShouldEqualJSON, "{}"), "Actual value not valid JSON: unexpected end of JSON input") + + // Support JSON array: + this.pass(so("[]", ShouldEqualJSON, "[]")) + + // Support any JSON value: + this.pass(so(`"hi"`, ShouldEqualJSON, `"hi"`)) + this.pass(so(`42`, ShouldEqualJSON, `42`)) + this.pass(so(`true`, ShouldEqualJSON, `true`)) + this.pass(so(`false`, ShouldEqualJSON, `false`)) + this.pass(so(`null`, ShouldEqualJSON, `null`)) +} + +func (this *AssertionsFixture) TestShouldPointTo() { + t1 := &Thing1{} + t2 := t1 + t3 := &Thing1{} + + pointer1 := reflect.ValueOf(t1).Pointer() + pointer3 := reflect.ValueOf(t3).Pointer() + + this.fail(so(t1, ShouldPointTo), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(t1, ShouldPointTo, t2, t3), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(t1, ShouldPointTo, t2)) + this.fail(so(t1, ShouldPointTo, t3), fmt.Sprintf( + "%v|%v|Expected '&{a:}' (address: '%v') and '&{a:}' (address: '%v') to be the same address (but their weren't)!", + pointer3, pointer1, pointer1, pointer3)) + + t4 := Thing1{} + t5 := t4 + + this.fail(so(t4, ShouldPointTo, t5), "Both arguments should be pointers (the first was not)!") + this.fail(so(&t4, ShouldPointTo, t5), "Both arguments should be pointers (the second was not)!") + this.fail(so(nil, ShouldPointTo, nil), "Both arguments should be pointers (the first was nil)!") + this.fail(so(&t4, ShouldPointTo, nil), "Both arguments should be pointers (the second was nil)!") +} + +func (this *AssertionsFixture) TestShouldNotPointTo() { + t1 := &Thing1{} + t2 := t1 + t3 := &Thing1{} + + pointer1 := reflect.ValueOf(t1).Pointer() + + this.fail(so(t1, ShouldNotPointTo), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(t1, ShouldNotPointTo, t2, t3), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(t1, ShouldNotPointTo, t3)) + this.fail(so(t1, ShouldNotPointTo, t2), fmt.Sprintf("Expected '&{a:}' and '&{a:}' to be different references (but they matched: '%v')!", pointer1)) + + t4 := Thing1{} + t5 := t4 + + this.fail(so(t4, ShouldNotPointTo, t5), "Both arguments should be pointers (the first was not)!") + this.fail(so(&t4, ShouldNotPointTo, t5), "Both arguments should be pointers (the second was not)!") + this.fail(so(nil, ShouldNotPointTo, nil), "Both arguments should be pointers (the first was nil)!") + this.fail(so(&t4, ShouldNotPointTo, nil), "Both arguments should be pointers (the second was nil)!") +} + +func (this *AssertionsFixture) TestShouldBeNil() { + this.fail(so(nil, ShouldBeNil, nil, nil, nil), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(nil, ShouldBeNil, nil), "This assertion requires exactly 0 comparison values (you provided 1).") + + this.pass(so(nil, ShouldBeNil)) + this.fail(so(1, ShouldBeNil), "Expected: nil Actual: '1'") + + var thing ThingInterface + this.pass(so(thing, ShouldBeNil)) + thing = &ThingImplementation{} + this.fail(so(thing, ShouldBeNil), "Expected: nil Actual: '&{}'") + + var thingOne *Thing1 + this.pass(so(thingOne, ShouldBeNil)) + + var nilSlice []int = nil + this.pass(so(nilSlice, ShouldBeNil)) + + var nilMap map[string]string = nil + this.pass(so(nilMap, ShouldBeNil)) + + var nilChannel chan int = nil + this.pass(so(nilChannel, ShouldBeNil)) + + var nilFunc func() = nil + this.pass(so(nilFunc, ShouldBeNil)) + + var nilInterface interface{} = nil + this.pass(so(nilInterface, ShouldBeNil)) +} + +func (this *AssertionsFixture) TestShouldNotBeNil() { + this.fail(so(nil, ShouldNotBeNil, nil, nil, nil), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(nil, ShouldNotBeNil, nil), "This assertion requires exactly 0 comparison values (you provided 1).") + + this.fail(so(nil, ShouldNotBeNil), "Expected '' to NOT be nil (but it was)!") + this.pass(so(1, ShouldNotBeNil)) + + var thing ThingInterface + this.fail(so(thing, ShouldNotBeNil), "Expected '' to NOT be nil (but it was)!") + thing = &ThingImplementation{} + this.pass(so(thing, ShouldNotBeNil)) +} + +func (this *AssertionsFixture) TestShouldBeTrue() { + this.fail(so(true, ShouldBeTrue, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(true, ShouldBeTrue, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + + this.fail(so(false, ShouldBeTrue), "Expected: true Actual: false") + this.fail(so(1, ShouldBeTrue), "Expected: true Actual: 1") + this.pass(so(true, ShouldBeTrue)) +} + +func (this *AssertionsFixture) TestShouldBeFalse() { + this.fail(so(false, ShouldBeFalse, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(false, ShouldBeFalse, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + + this.fail(so(true, ShouldBeFalse), "Expected: false Actual: true") + this.fail(so(1, ShouldBeFalse), "Expected: false Actual: 1") + this.pass(so(false, ShouldBeFalse)) +} + +func (this *AssertionsFixture) TestShouldBeZeroValue() { + this.fail(so(0, ShouldBeZeroValue, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(false, ShouldBeZeroValue, true), "This assertion requires exactly 0 comparison values (you provided 1).") + + this.fail(so(1, ShouldBeZeroValue), "0|1|'1' should have been the zero value") //"Expected: (zero value) Actual: 1") + this.fail(so(true, ShouldBeZeroValue), "false|true|'true' should have been the zero value") //"Expected: (zero value) Actual: true") + this.fail(so("123", ShouldBeZeroValue), "|123|'123' should have been the zero value") //"Expected: (zero value) Actual: 123") + this.fail(so(" ", ShouldBeZeroValue), "| |' ' should have been the zero value") //"Expected: (zero value) Actual: ") + this.fail(so([]string{"Nonempty"}, ShouldBeZeroValue), "[]|[Nonempty]|'[Nonempty]' should have been the zero value") //"Expected: (zero value) Actual: [Nonempty]") + this.fail(so(struct{ a string }{a: "asdf"}, ShouldBeZeroValue), "{}|{asdf}|'{a:asdf}' should have been the zero value") + this.pass(so(0, ShouldBeZeroValue)) + this.pass(so(false, ShouldBeZeroValue)) + this.pass(so("", ShouldBeZeroValue)) + this.pass(so(struct{}{}, ShouldBeZeroValue)) +} + +func (this *AssertionsFixture) TestShouldNotBeZeroValue() { + this.fail(so(0, ShouldNotBeZeroValue, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(false, ShouldNotBeZeroValue, true), "This assertion requires exactly 0 comparison values (you provided 1).") + + this.fail(so(0, ShouldNotBeZeroValue), "0|0|'0' should NOT have been the zero value") + this.fail(so(false, ShouldNotBeZeroValue), "false|false|'false' should NOT have been the zero value") + this.fail(so("", ShouldNotBeZeroValue), "||'' should NOT have been the zero value") + this.fail(so(struct{}{}, ShouldNotBeZeroValue), "{}|{}|'{}' should NOT have been the zero value") + + this.pass(so(1, ShouldNotBeZeroValue)) + this.pass(so(true, ShouldNotBeZeroValue)) + this.pass(so("123", ShouldNotBeZeroValue)) + this.pass(so(" ", ShouldNotBeZeroValue)) + this.pass(so([]string{"Nonempty"}, ShouldNotBeZeroValue)) + this.pass(so(struct{ a string }{a: "asdf"}, ShouldNotBeZeroValue)) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/filter.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/filter.go new file mode 100644 index 0000000000..cbf7566725 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/filter.go @@ -0,0 +1,31 @@ +package assertions + +import "fmt" + +const ( + success = "" + needExactValues = "This assertion requires exactly %d comparison values (you provided %d)." + needNonEmptyCollection = "This assertion requires at least 1 comparison value (you provided 0)." + needFewerValues = "This assertion allows %d or fewer comparison values (you provided %d)." +) + +func need(needed int, expected []interface{}) string { + if len(expected) != needed { + return fmt.Sprintf(needExactValues, needed, len(expected)) + } + return success +} + +func atLeast(minimum int, expected []interface{}) string { + if len(expected) < minimum { + return needNonEmptyCollection + } + return success +} + +func atMost(max int, expected []interface{}) string { + if len(expected) > max { + return fmt.Sprintf(needFewerValues, max, len(expected)) + } + return success +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/go.mod b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/go.mod new file mode 100644 index 0000000000..c0daaa3d3c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/go.mod @@ -0,0 +1,3 @@ +module github.com/smartystreets/assertions + +go 1.12 diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/.gitignore b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/.travis.yml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/.travis.yml new file mode 100644 index 0000000000..85868de60d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/.travis.yml @@ -0,0 +1,27 @@ +language: go + +os: + - linux + - osx + +go: + - 1.8.x + - 1.9.x + +sudo: false + +env: + global: + # Coveralls.io + - secure: OGYOsFNXNarEZ5yA4/M6ZdVguD0jL8vXgXrbLzjcpkKcq8ObHSCtNINoUlnNf6l6Z92kPnuV+LSm7jKTojBlov4IwgiY1ACbvg921SdjxYkg1AiwHTRTLR1g/esX8RdaBpJ0TOcXOFFsYMRVvl5sxxtb0tXSuUrT+Ch4SUCY7X8= + +install: + - make install-dependencies + - make install-tools + - make install + +script: + - make lint + - make test-with-coverage + - gover + - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/APACHE-LICENSE-2.0 b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/APACHE-LICENSE-2.0 new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/APACHE-LICENSE-2.0 @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/AUTHORS b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/AUTHORS new file mode 100644 index 0000000000..2d7bb2bf57 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/CONTRIBUTORS b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/CONTRIBUTORS new file mode 100644 index 0000000000..369e3d5519 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/LICENSE b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/LICENSE new file mode 100644 index 0000000000..937942c2b2 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/Makefile b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/Makefile new file mode 100644 index 0000000000..e013f0b31c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/Makefile @@ -0,0 +1,44 @@ +.PHONY: all clean clean-coverage install install-dependencies install-tools lint test test-verbose test-with-coverage + +export ARGS := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) +export PKG := github.com/sergi/go-diff +export ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) + +$(eval $(ARGS):;@:) # turn arguments into do-nothing targets +export ARGS + +ifdef ARGS + PKG_TEST := $(ARGS) +else + PKG_TEST := $(PKG)/... +endif + +all: install-tools install-dependencies install lint test + +clean: + go clean -i $(PKG)/... + go clean -i -race $(PKG)/... +clean-coverage: + find $(ROOT_DIR) | grep .coverprofile | xargs rm +install: + go install -v $(PKG)/... +install-dependencies: + go get -t -v $(PKG)/... + go build -v $(PKG)/... +install-tools: + # Install linting tools + go get -u -v github.com/golang/lint/... + go get -u -v github.com/kisielk/errcheck/... + + # Install code coverage tools + go get -u -v github.com/onsi/ginkgo/ginkgo/... + go get -u -v github.com/modocache/gover/... + go get -u -v github.com/mattn/goveralls/... +lint: + $(ROOT_DIR)/scripts/lint.sh +test: + go test -race -test.timeout 120s $(PKG_TEST) +test-verbose: + go test -race -test.timeout 120s -v $(PKG_TEST) +test-with-coverage: + ginkgo -r -cover -race -skipPackage="testdata" diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/README.md new file mode 100644 index 0000000000..597437bc75 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/README.md @@ -0,0 +1,84 @@ +# go-diff [![GoDoc](https://godoc.org/github.com/sergi/go-diff?status.png)](https://godoc.org/github.com/sergi/go-diff/diffmatchpatch) [![Build Status](https://travis-ci.org/sergi/go-diff.svg?branch=master)](https://travis-ci.org/sergi/go-diff) [![Coverage Status](https://coveralls.io/repos/sergi/go-diff/badge.png?branch=master)](https://coveralls.io/r/sergi/go-diff?branch=master) + +go-diff offers algorithms to perform operations required for synchronizing plain text: + +- Compare two texts and return their differences. +- Perform fuzzy matching of text. +- Apply patches onto text. + +## Installation + +```bash +go get -u github.com/sergi/go-diff/... +``` + +## Usage + +The following example compares two texts and writes out the differences to standard output. + +```go +package main + +import ( + "fmt" + + "github.com/sergi/go-diff/diffmatchpatch" +) + +const ( + text1 = "Lorem ipsum dolor." + text2 = "Lorem dolor sit amet." +) + +func main() { + dmp := diffmatchpatch.New() + + diffs := dmp.DiffMain(text1, text2, false) + + fmt.Println(dmp.DiffPrettyText(diffs)) +} +``` + +## Found a bug or are you missing a feature in go-diff? + +Please make sure to have the latest version of go-diff. If the problem still persists go through the [open issues](https://github.com/sergi/go-diff/issues) in the tracker first. If you cannot find your request just open up a [new issue](https://github.com/sergi/go-diff/issues/new). + +## How to contribute? + +You want to contribute to go-diff? GREAT! If you are here because of a bug you want to fix or a feature you want to add, you can just read on. Otherwise we have a list of [open issues in the tracker](https://github.com/sergi/go-diff/issues). Just choose something you think you can work on and discuss your plans in the issue by commenting on it. + +Please make sure that every behavioral change is accompanied by test cases. Additionally, every contribution must pass the `lint` and `test` Makefile targets which can be run using the following commands in the repository root directory. + +```bash +make lint +make test +``` + +After your contribution passes these commands, [create a PR](https://help.github.com/articles/creating-a-pull-request/) and we will review your contribution. + +## Origins + +go-diff is a Go language port of Neil Fraser's google-diff-match-patch code. His original code is available at [http://code.google.com/p/google-diff-match-patch/](http://code.google.com/p/google-diff-match-patch/). + +## Copyright and License + +The original Google Diff, Match and Patch Library is licensed under the [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0). The full terms of that license are included here in the [APACHE-LICENSE-2.0](/APACHE-LICENSE-2.0) file. + +Diff, Match and Patch Library + +> Written by Neil Fraser +> Copyright (c) 2006 Google Inc. +> + +This Go version of Diff, Match and Patch Library is licensed under the [MIT License](http://www.opensource.org/licenses/MIT) (a.k.a. the Expat License) which is included here in the [LICENSE](/LICENSE) file. + +Go version of Diff, Match and Patch Library + +> Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +> + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diff.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diff.go new file mode 100644 index 0000000000..cb25b43757 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1345 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +//go:generate stringer -type=Operation -trimprefix=Diff + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +// splice removes amount elements from slice at index index, replacing them with elements. +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + if len(elements) == amount { + // Easy case: overwrite the relevant items. + copy(slice[index:], elements) + return slice + } + if len(elements) < amount { + // Fewer new items than old. + // Copy in the new items. + copy(slice[index:], elements) + // Shift the remaining items left. + copy(slice[index+len(elements):], slice[index+amount:]) + // Calculate the new end of the slice. + end := len(slice) - amount + len(elements) + // Zero stranded elements at end so that they can be garbage collected. + tail := slice[end:] + for i := range tail { + tail[i] = Diff{} + } + return slice[:end] + } + // More new items than old. + // Make room in slice for new elements. + // There's probably an even more efficient way to do this, + // but this is simple and clear. + need := len(slice) - amount + len(elements) + for len(slice) < need { + slice = append(slice, Diff{}) + } + // Shift slice elements right to make room for new elements. + copy(slice[index+len(elements):], slice[index+amount:]) + // Copy in new elements. + copy(slice[index:], elements) + return slice +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + diffs := diffsA + diffs = append(diffs, Diff{DiffEqual, string(midCommon)}) + diffs = append(diffs, diffsB...) + return diffs + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && d%16 == 0 && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + // Linear search. See comment in commonSuffixLength. + n := 0 + for ; n < len(text1) && n < len(text2); n++ { + if text1[n] != text2[n] { + return n + } + } + return n +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + // Use linear search rather than the binary search discussed at https://neil.fraser.name/news/2007/10/09/. + // See discussion at https://github.com/sergi/go-diff/issues/54. + i1 := len(text1) + i2 := len(text2) + for n := 0; ; n++ { + i1-- + i2-- + if i1 < 0 || i2 < 0 || text1[i1] != text2[i2] { + return n + } + } +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + equalities := make([]int, 0, len(diffs)) + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + equalities = append(equalities, pointer) + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities[len(equalities)-1] + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities[:len(equalities)-1] + + if len(equalities) > 0 { + equalities = equalities[:len(equalities)-1] + } + pointer = -1 + if len(equalities) > 0 { + pointer = equalities[len(equalities)-1] + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = splice(diffs, pointer, 0, overlap) + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += utf8.RuneCountInString(aDiff.Text) + case DiffDelete: + deletions += utf8.RuneCountInString(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 0000000000..d3acc32ce1 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/match.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/match.go new file mode 100644 index 0000000000..17374e109f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/operation_string.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/operation_string.go new file mode 100644 index 0000000000..533ec0da7b --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/operation_string.go @@ -0,0 +1,17 @@ +// Code generated by "stringer -type=Operation -trimprefix=Diff"; DO NOT EDIT. + +package diffmatchpatch + +import "fmt" + +const _Operation_name = "DeleteEqualInsert" + +var _Operation_index = [...]uint8{0, 6, 11, 17} + +func (i Operation) String() string { + i -= -1 + if i < 0 || i >= Operation(len(_Operation_index)-1) { + return fmt.Sprintf("Operation(%d)", i+-1) + } + return _Operation_name[_Operation_index[i]:_Operation_index[i+1]] +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/patch.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/patch.go new file mode 100644 index 0000000000..223c43c426 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + Start1 int + Start2 int + Length1 int + Length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indices are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.Length1 == 0 { + coords1 = strconv.Itoa(p.Start1) + ",0" + } else if p.Length1 == 1 { + coords1 = strconv.Itoa(p.Start1 + 1) + } else { + coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) + } + + if p.Length2 == 0 { + coords2 = strconv.Itoa(p.Start2) + ",0" + } else if p.Length2 == 1 { + coords2 = strconv.Itoa(p.Start2 + 1) + } else { + coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.Start2 : patch.Start2+patch.Length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.Start2-padding) + minEnd := min(len(text), patch.Start2+patch.Length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.Start2-padding):patch.Start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.Start1 -= len(prefix) + patch.Start2 -= len(prefix) + // Extend the lengths. + patch.Length1 += len(prefix) + len(suffix) + patch.Length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.Start1 = charCount1 + patch.Start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.Length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.Length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.Length1 += len(aDiff.Text) + patch.Length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.Start1 = aPatch.Start1 + patchCopy.Start2 = aPatch.Start2 + patchCopy.Length1 = aPatch.Length1 + patchCopy.Length2 = aPatch.Length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.Start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.Length2 - aPatch.Length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].Start1 += paddingLength + patches[i].Start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].Start1 -= paddingLength // Should be 0. + patches[0].Start2 -= paddingLength // Should be 0. + patches[0].Length1 += paddingLength + patches[0].Length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].Start1 -= extraLength + patches[0].Start2 -= extraLength + patches[0].Length1 += extraLength + patches[0].Length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].Length1 += paddingLength + patches[last].Length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].Length1 += extraLength + patches[last].Length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].Length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + Start1 := bigpatch.Start1 + Start2 := bigpatch.Start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.Start1 = Start1 - len(precontext) + patch.Start2 = Start2 - len(precontext) + if len(precontext) != 0 { + patch.Length1 = len(precontext) + patch.Length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.Length2 += len(diffText) + Start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.Length1 += len(diffText) + Start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] + + patch.Length1 += len(diffText) + Start1 += len(diffText) + if diffType == DiffEqual { + patch.Length2 += len(diffText) + Start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.Length1 += len(postcontext) + patch.Length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.Start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.Start1-- + patch.Length1 = 1 + } else if m[2] == "0" { + patch.Length1 = 0 + } else { + patch.Start1-- + patch.Length1, _ = strconv.Atoi(m[2]) + } + + patch.Start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.Start2-- + patch.Length2 = 1 + } else if m[4] == "0" { + patch.Length2 = 0 + } else { + patch.Start2-- + patch.Length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 0000000000..265f29cc7e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/scripts/lint.sh b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/scripts/lint.sh new file mode 100755 index 0000000000..3dad05f5e5 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/scripts/lint.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +if [ -z ${PKG+x} ]; then echo "PKG is not set"; exit 1; fi +if [ -z ${ROOT_DIR+x} ]; then echo "ROOT_DIR is not set"; exit 1; fi + +echo "gofmt:" +OUT=$(gofmt -l $ROOT_DIR) +if [ $(echo "$OUT\c" | wc -l) -ne 0 ]; then echo "$OUT"; PROBLEM=1; fi + +echo "errcheck:" +OUT=$(errcheck $PKG/...) +if [ $(echo "$OUT\c" | wc -l) -ne 0 ]; then echo "$OUT"; PROBLEM=1; fi + +echo "go vet:" +OUT=$(go tool vet -all=true -v=true $ROOT_DIR 2>&1 | grep --invert-match -E "(Checking file|\%p of wrong type|can't check non-constant format)") +if [ $(echo "$OUT\c" | wc -l) -ne 0 ]; then echo "$OUT"; PROBLEM=1; fi + +echo "golint:" +OUT=$(golint $PKG/... | grep --invert-match -E "(method DiffPrettyHtml should be DiffPrettyHTML)") +if [ $(echo "$OUT\c" | wc -l) -ne 0 ]; then echo "$OUT"; PROBLEM=1; fi + +if [ -n "$PROBLEM" ]; then exit 1; fi diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/testdata/speedtest1.txt b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/testdata/speedtest1.txt new file mode 100644 index 0000000000..54b438fd79 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/testdata/speedtest1.txt @@ -0,0 +1,230 @@ +This is a '''list of newspapers published by [[Journal Register Company]]'''. + +The company owns daily and weekly newspapers, other print media properties and newspaper-affiliated local Websites in the [[U.S.]] states of [[Connecticut]], [[Michigan]], [[New York]], [[Ohio]] and [[Pennsylvania]], organized in six geographic "clusters":[http://www.journalregister.com/newspapers.html Journal Register Company: Our Newspapers], accessed February 10, 2008. + +== Capital-Saratoga == +Three dailies, associated weeklies and [[pennysaver]]s in greater [[Albany, New York]]; also [http://www.capitalcentral.com capitalcentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com]. + +* ''The Oneida Daily Dispatch'' {{WS|oneidadispatch.com}} of [[Oneida, New York]] +* ''[[The Record (Troy)|The Record]]'' {{WS|troyrecord.com}} of [[Troy, New York]] +* ''[[The Saratogian]]'' {{WS|saratogian.com}} of [[Saratoga Springs, New York]] +* Weeklies: +** ''Community News'' {{WS|cnweekly.com}} weekly of [[Clifton Park, New York]] +** ''Rome Observer'' of [[Rome, New York]] +** ''Life & Times of Utica'' of [[Utica, New York]] + +== Connecticut == +Five dailies, associated weeklies and [[pennysaver]]s in the state of [[Connecticut]]; also [http://www.ctcentral.com CTcentral.com], [http://www.ctcarsandtrucks.com CTCarsAndTrucks.com] and [http://www.jobsinct.com JobsInCT.com]. + +* ''The Middletown Press'' {{WS|middletownpress.com}} of [[Middletown, Connecticut|Middletown]] +* ''[[New Haven Register]]'' {{WS|newhavenregister.com}} of [[New Haven, Connecticut|New Haven]] +* ''The Register Citizen'' {{WS|registercitizen.com}} of [[Torrington, Connecticut|Torrington]] + +* [[New Haven Register#Competitors|Elm City Newspapers]] {{WS|ctcentral.com}} +** ''The Advertiser'' of [[East Haven, Connecticut|East Haven]] +** ''Hamden Chronicle'' of [[Hamden, Connecticut|Hamden]] +** ''Milford Weekly'' of [[Milford, Connecticut|Milford]] +** ''The Orange Bulletin'' of [[Orange, Connecticut|Orange]] +** ''The Post'' of [[North Haven, Connecticut|North Haven]] +** ''Shelton Weekly'' of [[Shelton, Connecticut|Shelton]] +** ''The Stratford Bard'' of [[Stratford, Connecticut|Stratford]] +** ''Wallingford Voice'' of [[Wallingford, Connecticut|Wallingford]] +** ''West Haven News'' of [[West Haven, Connecticut|West Haven]] +* Housatonic Publications +** ''The New Milford Times'' {{WS|newmilfordtimes.com}} of [[New Milford, Connecticut|New Milford]] +** ''The Brookfield Journal'' of [[Brookfield, Connecticut|Brookfield]] +** ''The Kent Good Times Dispatch'' of [[Kent, Connecticut|Kent]] +** ''The Bethel Beacon'' of [[Bethel, Connecticut|Bethel]] +** ''The Litchfield Enquirer'' of [[Litchfield, Connecticut|Litchfield]] +** ''Litchfield County Times'' of [[Litchfield, Connecticut|Litchfield]] +* Imprint Newspapers {{WS|imprintnewspapers.com}} +** ''West Hartford News'' of [[West Hartford, Connecticut|West Hartford]] +** ''Windsor Journal'' of [[Windsor, Connecticut|Windsor]] +** ''Windsor Locks Journal'' of [[Windsor Locks, Connecticut|Windsor Locks]] +** ''Avon Post'' of [[Avon, Connecticut|Avon]] +** ''Farmington Post'' of [[Farmington, Connecticut|Farmington]] +** ''Simsbury Post'' of [[Simsbury, Connecticut|Simsbury]] +** ''Tri-Town Post'' of [[Burlington, Connecticut|Burlington]], [[Canton, Connecticut|Canton]] and [[Harwinton, Connecticut|Harwinton]] +* Minuteman Publications +** ''[[Fairfield Minuteman]]'' of [[Fairfield, Connecticut|Fairfield]] +** ''The Westport Minuteman'' {{WS|westportminuteman.com}} of [[Westport, Connecticut|Westport]] +* Shoreline Newspapers weeklies: +** ''Branford Review'' of [[Branford, Connecticut|Branford]] +** ''Clinton Recorder'' of [[Clinton, Connecticut|Clinton]] +** ''The Dolphin'' of [[Naval Submarine Base New London]] in [[New London, Connecticut|New London]] +** ''Main Street News'' {{WS|ctmainstreetnews.com}} of [[Essex, Connecticut|Essex]] +** ''Pictorial Gazette'' of [[Old Saybrook, Connecticut|Old Saybrook]] +** ''Regional Express'' of [[Colchester, Connecticut|Colchester]] +** ''Regional Standard'' of [[Colchester, Connecticut|Colchester]] +** ''Shoreline Times'' {{WS|shorelinetimes.com}} of [[Guilford, Connecticut|Guilford]] +** ''Shore View East'' of [[Madison, Connecticut|Madison]] +** ''Shore View West'' of [[Guilford, Connecticut|Guilford]] +* Other weeklies: +** ''Registro'' {{WS|registroct.com}} of [[New Haven, Connecticut|New Haven]] +** ''Thomaston Express'' {{WS|thomastownexpress.com}} of [[Thomaston, Connecticut|Thomaston]] +** ''Foothills Traders'' {{WS|foothillstrader.com}} of Torrington, Bristol, Canton + +== Michigan == +Four dailies, associated weeklies and [[pennysaver]]s in the state of [[Michigan]]; also [http://www.micentralhomes.com MIcentralhomes.com] and [http://www.micentralautos.com MIcentralautos.com] +* ''[[Oakland Press]]'' {{WS|theoaklandpress.com}} of [[Oakland, Michigan|Oakland]] +* ''Daily Tribune'' {{WS|dailytribune.com}} of [[Royal Oak, Michigan|Royal Oak]] +* ''Macomb Daily'' {{WS|macombdaily.com}} of [[Mt. Clemens, Michigan|Mt. Clemens]] +* ''[[Morning Sun]]'' {{WS|themorningsun.com}} of [[Mount Pleasant, Michigan|Mount Pleasant]] +* Heritage Newspapers {{WS|heritage.com}} +** ''Belleville View'' +** ''Ile Camera'' +** ''Monroe Guardian'' +** ''Ypsilanti Courier'' +** ''News-Herald'' +** ''Press & Guide'' +** ''Chelsea Standard & Dexter Leader'' +** ''Manchester Enterprise'' +** ''Milan News-Leader'' +** ''Saline Reporter'' +* Independent Newspapers {{WS|sourcenewspapers.com}} +** ''Advisor'' +** ''Source'' +* Morning Star {{WS|morningstarpublishing.com}} +** ''Alma Reminder'' +** ''Alpena Star'' +** ''Antrim County News'' +** ''Carson City Reminder'' +** ''The Leader & Kalkaskian'' +** ''Ogemaw/Oscoda County Star'' +** ''Petoskey/Charlevoix Star'' +** ''Presque Isle Star'' +** ''Preview Community Weekly'' +** ''Roscommon County Star'' +** ''St. Johns Reminder'' +** ''Straits Area Star'' +** ''The (Edmore) Advertiser'' +* Voice Newspapers {{WS|voicenews.com}} +** ''Armada Times'' +** ''Bay Voice'' +** ''Blue Water Voice'' +** ''Downriver Voice'' +** ''Macomb Township Voice'' +** ''North Macomb Voice'' +** ''Weekend Voice'' +** ''Suburban Lifestyles'' {{WS|suburbanlifestyles.com}} + +== Mid-Hudson == +One daily, associated magazines in the [[Hudson River Valley]] of [[New York]]; also [http://www.midhudsoncentral.com MidHudsonCentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com]. + +* ''[[Daily Freeman]]'' {{WS|dailyfreeman.com}} of [[Kingston, New York]] + +== Ohio == +Two dailies, associated magazines and three shared Websites, all in the state of [[Ohio]]: [http://www.allaroundcleveland.com AllAroundCleveland.com], [http://www.allaroundclevelandcars.com AllAroundClevelandCars.com] and [http://www.allaroundclevelandjobs.com AllAroundClevelandJobs.com]. + +* ''[[The News-Herald (Ohio)|The News-Herald]]'' {{WS|news-herald.com}} of [[Willoughby, Ohio|Willoughby]] +* ''[[The Morning Journal]]'' {{WS|morningjournal.com}} of [[Lorain, Ohio|Lorain]] + +== Philadelphia area == +Seven dailies and associated weeklies and magazines in [[Pennsylvania]] and [[New Jersey]], and associated Websites: [http://www.allaroundphilly.com AllAroundPhilly.com], [http://www.jobsinnj.com JobsInNJ.com], [http://www.jobsinpa.com JobsInPA.com], and [http://www.phillycarsearch.com PhillyCarSearch.com]. + +* ''The Daily Local'' {{WS|dailylocal.com}} of [[West Chester, Pennsylvania|West Chester]] +* ''[[Delaware County Daily and Sunday Times]] {{WS|delcotimes.com}} of Primos +* ''[[The Mercury (Pennsylvania)|The Mercury]]'' {{WS|pottstownmercury.com}} of [[Pottstown, Pennsylvania|Pottstown]] +* ''The Phoenix'' {{WS|phoenixvillenews.com}} of [[Phoenixville, Pennsylvania|Phoenixville]] +* ''[[The Reporter (Lansdale)|The Reporter]]'' {{WS|thereporteronline.com}} of [[Lansdale, Pennsylvania|Lansdale]] +* ''The Times Herald'' {{WS|timesherald.com}} of [[Norristown, Pennsylvania|Norristown]] +* ''[[The Trentonian]]'' {{WS|trentonian.com}} of [[Trenton, New Jersey]] + +* Weeklies +** ''El Latino Expreso'' of [[Trenton, New Jersey]] +** ''La Voz'' of [[Norristown, Pennsylvania]] +** ''The Village News'' of [[Downingtown, Pennsylvania]] +** ''The Times Record'' of [[Kennett Square, Pennsylvania]] +** ''The Tri-County Record'' {{WS|tricountyrecord.com}} of [[Morgantown, Pennsylvania]] +** ''News of Delaware County'' {{WS|newsofdelawarecounty.com}}of [[Havertown, Pennsylvania]] +** ''Main Line Times'' {{WS|mainlinetimes.com}}of [[Ardmore, Pennsylvania]] +** ''Penny Pincher'' of [[Pottstown, Pennsylvania]] +** ''Town Talk'' {{WS|towntalknews.com}} of [[Ridley, Pennsylvania]] +* Chesapeake Publishing {{WS|pa8newsgroup.com}} +** ''Solanco Sun Ledger'' of [[Quarryville, Pennsylvania]] +** ''Columbia Ledger'' of [[Columbia, Pennsylvania]] +** ''Coatesville Ledger'' of [[Downingtown, Pennsylvania]] +** ''Parkesburg Post Ledger'' of [[Quarryville, Pennsylvania]] +** ''Downingtown Ledger'' of [[Downingtown, Pennsylvania]] +** ''The Kennett Paper'' of [[Kennett Square, Pennsylvania]] +** ''Avon Grove Sun'' of [[West Grove, Pennsylvania]] +** ''Oxford Tribune'' of [[Oxford, Pennsylvania]] +** ''Elizabethtown Chronicle'' of [[Elizabethtown, Pennsylvania]] +** ''Donegal Ledger'' of [[Donegal, Pennsylvania]] +** ''Chadds Ford Post'' of [[Chadds Ford, Pennsylvania]] +** ''The Central Record'' of [[Medford, New Jersey]] +** ''Maple Shade Progress'' of [[Maple Shade, New Jersey]] +* Intercounty Newspapers {{WS|buckslocalnews.com}} +** ''The Review'' of Roxborough, Pennsylvania +** ''The Recorder'' of [[Conshohocken, Pennsylvania]] +** ''The Leader'' of [[Mount Airy, Pennsylvania|Mount Airy]] and West Oak Lake, Pennsylvania +** ''The Pennington Post'' of [[Pennington, New Jersey]] +** ''The Bristol Pilot'' of [[Bristol, Pennsylvania]] +** ''Yardley News'' of [[Yardley, Pennsylvania]] +** ''New Hope Gazette'' of [[New Hope, Pennsylvania]] +** ''Doylestown Patriot'' of [[Doylestown, Pennsylvania]] +** ''Newtown Advance'' of [[Newtown, Pennsylvania]] +** ''The Plain Dealer'' of [[Williamstown, New Jersey]] +** ''News Report'' of [[Sewell, New Jersey]] +** ''Record Breeze'' of [[Berlin, New Jersey]] +** ''Newsweekly'' of [[Moorestown, New Jersey]] +** ''Haddon Herald'' of [[Haddonfield, New Jersey]] +** ''New Egypt Press'' of [[New Egypt, New Jersey]] +** ''Community News'' of [[Pemberton, New Jersey]] +** ''Plymouth Meeting Journal'' of [[Plymouth Meeting, Pennsylvania]] +** ''Lafayette Hill Journal'' of [[Lafayette Hill, Pennsylvania]] +* Montgomery Newspapers {{WS|montgomerynews.com}} +** ''Ambler Gazette'' of [[Ambler, Pennsylvania]] +** ''Central Bucks Life'' of [[Bucks County, Pennsylvania]] +** ''The Colonial'' of [[Plymouth Meeting, Pennsylvania]] +** ''Glenside News'' of [[Glenside, Pennsylvania]] +** ''The Globe'' of [[Lower Moreland Township, Pennsylvania]] +** ''Main Line Life'' of [[Ardmore, Pennsylvania]] +** ''Montgomery Life'' of [[Fort Washington, Pennsylvania]] +** ''North Penn Life'' of [[Lansdale, Pennsylvania]] +** ''Perkasie News Herald'' of [[Perkasie, Pennsylvania]] +** ''Public Spirit'' of [[Hatboro, Pennsylvania]] +** ''Souderton Independent'' of [[Souderton, Pennsylvania]] +** ''Springfield Sun'' of [[Springfield, Pennsylvania]] +** ''Spring-Ford Reporter'' of [[Royersford, Pennsylvania]] +** ''Times Chronicle'' of [[Jenkintown, Pennsylvania]] +** ''Valley Item'' of [[Perkiomenville, Pennsylvania]] +** ''Willow Grove Guide'' of [[Willow Grove, Pennsylvania]] +* News Gleaner Publications (closed December 2008) {{WS|newsgleaner.com}} +** ''Life Newspapers'' of [[Philadelphia, Pennsylvania]] +* Suburban Publications +** ''The Suburban & Wayne Times'' {{WS|waynesuburban.com}} of [[Wayne, Pennsylvania]] +** ''The Suburban Advertiser'' of [[Exton, Pennsylvania]] +** ''The King of Prussia Courier'' of [[King of Prussia, Pennsylvania]] +* Press Newspapers {{WS|countypressonline.com}} +** ''County Press'' of [[Newtown Square, Pennsylvania]] +** ''Garnet Valley Press'' of [[Glen Mills, Pennsylvania]] +** ''Haverford Press'' of [[Newtown Square, Pennsylvania]] (closed January 2009) +** ''Hometown Press'' of [[Glen Mills, Pennsylvania]] (closed January 2009) +** ''Media Press'' of [[Newtown Square, Pennsylvania]] (closed January 2009) +** ''Springfield Press'' of [[Springfield, Pennsylvania]] +* Berks-Mont Newspapers {{WS|berksmontnews.com}} +** ''The Boyertown Area Times'' of [[Boyertown, Pennsylvania]] +** ''The Kutztown Area Patriot'' of [[Kutztown, Pennsylvania]] +** ''The Hamburg Area Item'' of [[Hamburg, Pennsylvania]] +** ''The Southern Berks News'' of [[Exeter Township, Berks County, Pennsylvania]] +** ''The Free Press'' of [[Quakertown, Pennsylvania]] +** ''The Saucon News'' of [[Quakertown, Pennsylvania]] +** ''Westside Weekly'' of [[Reading, Pennsylvania]] + +* Magazines +** ''Bucks Co. Town & Country Living'' +** ''Chester Co. Town & Country Living'' +** ''Montomgery Co. Town & Country Living'' +** ''Garden State Town & Country Living'' +** ''Montgomery Homes'' +** ''Philadelphia Golfer'' +** ''Parents Express'' +** ''Art Matters'' + +{{JRC}} + +==References== + + +[[Category:Journal Register publications|*]] diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/testdata/speedtest2.txt b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/testdata/speedtest2.txt new file mode 100644 index 0000000000..8f25a80fff --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-diff/testdata/speedtest2.txt @@ -0,0 +1,188 @@ +This is a '''list of newspapers published by [[Journal Register Company]]'''. + +The company owns daily and weekly newspapers, other print media properties and newspaper-affiliated local Websites in the [[U.S.]] states of [[Connecticut]], [[Michigan]], [[New York]], [[Ohio]], [[Pennsylvania]] and [[New Jersey]], organized in six geographic "clusters":[http://www.journalregister.com/publications.html Journal Register Company: Our Publications], accessed April 21, 2010. + +== Capital-Saratoga == +Three dailies, associated weeklies and [[pennysaver]]s in greater [[Albany, New York]]; also [http://www.capitalcentral.com capitalcentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com]. + +* ''The Oneida Daily Dispatch'' {{WS|oneidadispatch.com}} of [[Oneida, New York]] +* ''[[The Record (Troy)|The Record]]'' {{WS|troyrecord.com}} of [[Troy, New York]] +* ''[[The Saratogian]]'' {{WS|saratogian.com}} of [[Saratoga Springs, New York]] +* Weeklies: +** ''Community News'' {{WS|cnweekly.com}} weekly of [[Clifton Park, New York]] +** ''Rome Observer'' {{WS|romeobserver.com}} of [[Rome, New York]] +** ''WG Life '' {{WS|saratogian.com/wglife/}} of [[Wilton, New York]] +** ''Ballston Spa Life '' {{WS|saratogian.com/bspalife}} of [[Ballston Spa, New York]] +** ''Greenbush Life'' {{WS|troyrecord.com/greenbush}} of [[Troy, New York]] +** ''Latham Life'' {{WS|troyrecord.com/latham}} of [[Latham, New York]] +** ''River Life'' {{WS|troyrecord.com/river}} of [[Troy, New York]] + +== Connecticut == +Three dailies, associated weeklies and [[pennysaver]]s in the state of [[Connecticut]]; also [http://www.ctcentral.com CTcentral.com], [http://www.ctcarsandtrucks.com CTCarsAndTrucks.com] and [http://www.jobsinct.com JobsInCT.com]. + +* ''The Middletown Press'' {{WS|middletownpress.com}} of [[Middletown, Connecticut|Middletown]] +* ''[[New Haven Register]]'' {{WS|newhavenregister.com}} of [[New Haven, Connecticut|New Haven]] +* ''The Register Citizen'' {{WS|registercitizen.com}} of [[Torrington, Connecticut|Torrington]] + +* Housatonic Publications +** ''The Housatonic Times'' {{WS|housatonictimes.com}} of [[New Milford, Connecticut|New Milford]] +** ''Litchfield County Times'' {{WS|countytimes.com}} of [[Litchfield, Connecticut|Litchfield]] + +* Minuteman Publications +** ''[[Fairfield Minuteman]]'' {{WS|fairfieldminuteman.com}}of [[Fairfield, Connecticut|Fairfield]] +** ''The Westport Minuteman'' {{WS|westportminuteman.com}} of [[Westport, Connecticut|Westport]] + +* Shoreline Newspapers +** ''The Dolphin'' {{WS|dolphin-news.com}} of [[Naval Submarine Base New London]] in [[New London, Connecticut|New London]] +** ''Shoreline Times'' {{WS|shorelinetimes.com}} of [[Guilford, Connecticut|Guilford]] + +* Foothills Media Group {{WS|foothillsmediagroup.com}} +** ''Thomaston Express'' {{WS|thomastonexpress.com}} of [[Thomaston, Connecticut|Thomaston]] +** ''Good News About Torrington'' {{WS|goodnewsabouttorrington.com}} of [[Torrington, Connecticut|Torrington]] +** ''Granby News'' {{WS|foothillsmediagroup.com/granby}} of [[Granby, Connecticut|Granby]] +** ''Canton News'' {{WS|foothillsmediagroup.com/canton}} of [[Canton, Connecticut|Canton]] +** ''Avon News'' {{WS|foothillsmediagroup.com/avon}} of [[Avon, Connecticut|Avon]] +** ''Simsbury News'' {{WS|foothillsmediagroup.com/simsbury}} of [[Simsbury, Connecticut|Simsbury]] +** ''Litchfield News'' {{WS|foothillsmediagroup.com/litchfield}} of [[Litchfield, Connecticut|Litchfield]] +** ''Foothills Trader'' {{WS|foothillstrader.com}} of Torrington, Bristol, Canton + +* Other weeklies +** ''The Milford-Orange Bulletin'' {{WS|ctbulletin.com}} of [[Orange, Connecticut|Orange]] +** ''The Post-Chronicle'' {{WS|ctpostchronicle.com}} of [[North Haven, Connecticut|North Haven]] +** ''West Hartford News'' {{WS|westhartfordnews.com}} of [[West Hartford, Connecticut|West Hartford]] + +* Magazines +** ''The Connecticut Bride'' {{WS|connecticutmag.com}} +** ''Connecticut Magazine'' {{WS|theconnecticutbride.com}} +** ''Passport Magazine'' {{WS|passport-mag.com}} + +== Michigan == +Four dailies, associated weeklies and [[pennysaver]]s in the state of [[Michigan]]; also [http://www.micentralhomes.com MIcentralhomes.com] and [http://www.micentralautos.com MIcentralautos.com] +* ''[[Oakland Press]]'' {{WS|theoaklandpress.com}} of [[Oakland, Michigan|Oakland]] +* ''Daily Tribune'' {{WS|dailytribune.com}} of [[Royal Oak, Michigan|Royal Oak]] +* ''Macomb Daily'' {{WS|macombdaily.com}} of [[Mt. Clemens, Michigan|Mt. Clemens]] +* ''[[Morning Sun]]'' {{WS|themorningsun.com}} of [[Mount Pleasant, Michigan|Mount Pleasant]] + +* Heritage Newspapers {{WS|heritage.com}} +** ''Belleville View'' {{WS|bellevilleview.com}} +** ''Ile Camera'' {{WS|thenewsherald.com/ile_camera}} +** ''Monroe Guardian'' {{WS|monreguardian.com}} +** ''Ypsilanti Courier'' {{WS|ypsilanticourier.com}} +** ''News-Herald'' {{WS|thenewsherald.com}} +** ''Press & Guide'' {{WS|pressandguide.com}} +** ''Chelsea Standard & Dexter Leader'' {{WS|chelseastandard.com}} +** ''Manchester Enterprise'' {{WS|manchesterguardian.com}} +** ''Milan News-Leader'' {{WS|milannews.com}} +** ''Saline Reporter'' {{WS|salinereporter.com}} +* Independent Newspapers +** ''Advisor'' {{WS|sourcenewspapers.com}} +** ''Source'' {{WS|sourcenewspapers.com}} +* Morning Star {{WS|morningstarpublishing.com}} +** ''The Leader & Kalkaskian'' {{WS|leaderandkalkaskian.com}} +** ''Grand Traverse Insider'' {{WS|grandtraverseinsider.com}} +** ''Alma Reminder'' +** ''Alpena Star'' +** ''Ogemaw/Oscoda County Star'' +** ''Presque Isle Star'' +** ''St. Johns Reminder'' + +* Voice Newspapers {{WS|voicenews.com}} +** ''Armada Times'' +** ''Bay Voice'' +** ''Blue Water Voice'' +** ''Downriver Voice'' +** ''Macomb Township Voice'' +** ''North Macomb Voice'' +** ''Weekend Voice'' + +== Mid-Hudson == +One daily, associated magazines in the [[Hudson River Valley]] of [[New York]]; also [http://www.midhudsoncentral.com MidHudsonCentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com]. + +* ''[[Daily Freeman]]'' {{WS|dailyfreeman.com}} of [[Kingston, New York]] +* ''Las Noticias'' {{WS|lasnoticiasny.com}} of [[Kingston, New York]] + +== Ohio == +Two dailies, associated magazines and three shared Websites, all in the state of [[Ohio]]: [http://www.allaroundcleveland.com AllAroundCleveland.com], [http://www.allaroundclevelandcars.com AllAroundClevelandCars.com] and [http://www.allaroundclevelandjobs.com AllAroundClevelandJobs.com]. + +* ''[[The News-Herald (Ohio)|The News-Herald]]'' {{WS|news-herald.com}} of [[Willoughby, Ohio|Willoughby]] +* ''[[The Morning Journal]]'' {{WS|morningjournal.com}} of [[Lorain, Ohio|Lorain]] +* ''El Latino Expreso'' {{WS|lorainlatino.com}} of [[Lorain, Ohio|Lorain]] + +== Philadelphia area == +Seven dailies and associated weeklies and magazines in [[Pennsylvania]] and [[New Jersey]], and associated Websites: [http://www.allaroundphilly.com AllAroundPhilly.com], [http://www.jobsinnj.com JobsInNJ.com], [http://www.jobsinpa.com JobsInPA.com], and [http://www.phillycarsearch.com PhillyCarSearch.com]. + +* ''[[The Daily Local News]]'' {{WS|dailylocal.com}} of [[West Chester, Pennsylvania|West Chester]] +* ''[[Delaware County Daily and Sunday Times]] {{WS|delcotimes.com}} of Primos [[Upper Darby Township, Pennsylvania]] +* ''[[The Mercury (Pennsylvania)|The Mercury]]'' {{WS|pottstownmercury.com}} of [[Pottstown, Pennsylvania|Pottstown]] +* ''[[The Reporter (Lansdale)|The Reporter]]'' {{WS|thereporteronline.com}} of [[Lansdale, Pennsylvania|Lansdale]] +* ''The Times Herald'' {{WS|timesherald.com}} of [[Norristown, Pennsylvania|Norristown]] +* ''[[The Trentonian]]'' {{WS|trentonian.com}} of [[Trenton, New Jersey]] + +* Weeklies +* ''The Phoenix'' {{WS|phoenixvillenews.com}} of [[Phoenixville, Pennsylvania]] +** ''El Latino Expreso'' {{WS|njexpreso.com}} of [[Trenton, New Jersey]] +** ''La Voz'' {{WS|lavozpa.com}} of [[Norristown, Pennsylvania]] +** ''The Tri County Record'' {{WS|tricountyrecord.com}} of [[Morgantown, Pennsylvania]] +** ''Penny Pincher'' {{WS|pennypincherpa.com}}of [[Pottstown, Pennsylvania]] + +* Chesapeake Publishing {{WS|southernchestercountyweeklies.com}} +** ''The Kennett Paper'' {{WS|kennettpaper.com}} of [[Kennett Square, Pennsylvania]] +** ''Avon Grove Sun'' {{WS|avongrovesun.com}} of [[West Grove, Pennsylvania]] +** ''The Central Record'' {{WS|medfordcentralrecord.com}} of [[Medford, New Jersey]] +** ''Maple Shade Progress'' {{WS|mapleshadeprogress.com}} of [[Maple Shade, New Jersey]] + +* Intercounty Newspapers {{WS|buckslocalnews.com}} {{WS|southjerseylocalnews.com}} +** ''The Pennington Post'' {{WS|penningtonpost.com}} of [[Pennington, New Jersey]] +** ''The Bristol Pilot'' {{WS|bristolpilot.com}} of [[Bristol, Pennsylvania]] +** ''Yardley News'' {{WS|yardleynews.com}} of [[Yardley, Pennsylvania]] +** ''Advance of Bucks County'' {{WS|advanceofbucks.com}} of [[Newtown, Pennsylvania]] +** ''Record Breeze'' {{WS|recordbreeze.com}} of [[Berlin, New Jersey]] +** ''Community News'' {{WS|sjcommunitynews.com}} of [[Pemberton, New Jersey]] + +* Montgomery Newspapers {{WS|montgomerynews.com}} +** ''Ambler Gazette'' {{WS|amblergazette.com}} of [[Ambler, Pennsylvania]] +** ''The Colonial'' {{WS|colonialnews.com}} of [[Plymouth Meeting, Pennsylvania]] +** ''Glenside News'' {{WS|glensidenews.com}} of [[Glenside, Pennsylvania]] +** ''The Globe'' {{WS|globenewspaper.com}} of [[Lower Moreland Township, Pennsylvania]] +** ''Montgomery Life'' {{WS|montgomerylife.com}} of [[Fort Washington, Pennsylvania]] +** ''North Penn Life'' {{WS|northpennlife.com}} of [[Lansdale, Pennsylvania]] +** ''Perkasie News Herald'' {{WS|perkasienewsherald.com}} of [[Perkasie, Pennsylvania]] +** ''Public Spirit'' {{WS|thepublicspirit.com}} of [[Hatboro, Pennsylvania]] +** ''Souderton Independent'' {{WS|soudertonindependent.com}} of [[Souderton, Pennsylvania]] +** ''Springfield Sun'' {{WS|springfieldsun.com}} of [[Springfield, Pennsylvania]] +** ''Spring-Ford Reporter'' {{WS|springfordreporter.com}} of [[Royersford, Pennsylvania]] +** ''Times Chronicle'' {{WS|thetimeschronicle.com}} of [[Jenkintown, Pennsylvania]] +** ''Valley Item'' {{WS|valleyitem.com}} of [[Perkiomenville, Pennsylvania]] +** ''Willow Grove Guide'' {{WS|willowgroveguide.com}} of [[Willow Grove, Pennsylvania]] +** ''The Review'' {{WS|roxreview.com}} of [[Roxborough, Philadelphia, Pennsylvania]] + +* Main Line Media News {{WS|mainlinemedianews.com}} +** ''Main Line Times'' {{WS|mainlinetimes.com}} of [[Ardmore, Pennsylvania]] +** ''Main Line Life'' {{WS|mainlinelife.com}} of [[Ardmore, Pennsylvania]] +** ''The King of Prussia Courier'' {{WS|kingofprussiacourier.com}} of [[King of Prussia, Pennsylvania]] + +* Delaware County News Network {{WS|delconewsnetwork.com}} +** ''News of Delaware County'' {{WS|newsofdelawarecounty.com}} of [[Havertown, Pennsylvania]] +** ''County Press'' {{WS|countypressonline.com}} of [[Newtown Square, Pennsylvania]] +** ''Garnet Valley Press'' {{WS|countypressonline.com}} of [[Glen Mills, Pennsylvania]] +** ''Springfield Press'' {{WS|countypressonline.com}} of [[Springfield, Pennsylvania]] +** ''Town Talk'' {{WS|towntalknews.com}} of [[Ridley, Pennsylvania]] + +* Berks-Mont Newspapers {{WS|berksmontnews.com}} +** ''The Boyertown Area Times'' {{WS|berksmontnews.com/boyertown_area_times}} of [[Boyertown, Pennsylvania]] +** ''The Kutztown Area Patriot'' {{WS|berksmontnews.com/kutztown_area_patriot}} of [[Kutztown, Pennsylvania]] +** ''The Hamburg Area Item'' {{WS|berksmontnews.com/hamburg_area_item}} of [[Hamburg, Pennsylvania]] +** ''The Southern Berks News'' {{WS|berksmontnews.com/southern_berks_news}} of [[Exeter Township, Berks County, Pennsylvania]] +** ''Community Connection'' {{WS|berksmontnews.com/community_connection}} of [[Boyertown, Pennsylvania]] + +* Magazines +** ''Bucks Co. Town & Country Living'' {{WS|buckscountymagazine.com}} +** ''Parents Express'' {{WS|parents-express.com}} +** ''Real Men, Rednecks'' {{WS|realmenredneck.com}} + +{{JRC}} + +==References== + + +[[Category:Journal Register publications|*]] diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/.travis.yml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/.travis.yml new file mode 100644 index 0000000000..5a19a5faf3 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/.travis.yml @@ -0,0 +1,21 @@ +# Copyright (c) 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# {sudo: required, dist: trusty} is the magic incantation to pick the trusty +# beta environment, which is the only environment we can get that has >4GB +# memory. Currently the `go test -race` tests that we run will peak at just +# over 4GB, which results in everything getting OOM-killed. +sudo: required +dist: trusty + +language: go + +go: +- 1.4.2 + +before_install: + - go get github.com/maruel/pre-commit-go/cmd/pcg + +script: + - pcg diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/LICENSE b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/LICENSE new file mode 100644 index 0000000000..6280ff0e06 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/LICENSE @@ -0,0 +1,27 @@ +// Copyright (c) 2015 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py new file mode 100644 index 0000000000..d05f0cd873 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py @@ -0,0 +1,109 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Top-level presubmit script. + +See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for +details on the presubmit API built into depot_tools. +""" + +import os +import sys + + +def PreCommitGo(input_api, output_api, pcg_mode): + """Run go-specific checks via pre-commit-go (pcg) if it's in PATH.""" + if input_api.is_committing: + error_type = output_api.PresubmitError + else: + error_type = output_api.PresubmitPromptWarning + + exe = 'pcg.exe' if sys.platform == 'win32' else 'pcg' + pcg = None + for p in os.environ['PATH'].split(os.pathsep): + pcg = os.path.join(p, exe) + if os.access(pcg, os.X_OK): + break + else: + return [ + error_type( + 'pre-commit-go executable (pcg) could not be found in PATH. All Go ' + 'checks are skipped. See https://github.com/maruel/pre-commit-go.') + ] + + cmd = [pcg, 'run', '-m', ','.join(pcg_mode)] + if input_api.verbose: + cmd.append('-v') + # pcg can figure out what files to check on its own based on upstream ref, + # but on PRESUBMIT try builder upsteram isn't set, and it's just 1 commit. + if os.getenv('PRESUBMIT_BUILDER', ''): + cmd.extend(['-r', 'HEAD~1']) + return input_api.RunTests([ + input_api.Command( + name='pre-commit-go: %s' % ', '.join(pcg_mode), + cmd=cmd, + kwargs={}, + message=error_type), + ]) + + +def header(input_api): + """Returns the expected license header regexp for this project.""" + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(xrange(2011, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright %(year)s The Chromium Authors\. ' + r'All rights reserved\.\n' + r'.*? Use of this source code is governed by a BSD-style license ' + r'that can be\n' + r'.*? found in the LICENSE file\.(?: \*/)?\n' + ) % { + 'year': years_re, + } + return license_header + + +def source_file_filter(input_api): + """Returns filter that selects source code files only.""" + bl = list(input_api.DEFAULT_BLACK_LIST) + [ + r'.+\.pb\.go$', + r'.+_string\.go$', + ] + wl = list(input_api.DEFAULT_WHITE_LIST) + [ + r'.+\.go$', + ] + return lambda x: input_api.FilterSourceFile(x, white_list=wl, black_list=bl) + + +def CommonChecks(input_api, output_api): + results = [] + results.extend( + input_api.canned_checks.CheckChangeHasNoStrayWhitespace( + input_api, output_api, + source_file_filter=source_file_filter(input_api))) + results.extend( + input_api.canned_checks.CheckLicense( + input_api, output_api, header(input_api), + source_file_filter=source_file_filter(input_api))) + return results + + +def CheckChangeOnUpload(input_api, output_api): + results = CommonChecks(input_api, output_api) + results.extend(PreCommitGo(input_api, output_api, ['lint', 'pre-commit'])) + return results + + +def CheckChangeOnCommit(input_api, output_api): + results = CommonChecks(input_api, output_api) + results.extend(input_api.canned_checks.CheckChangeHasDescription( + input_api, output_api)) + results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription( + input_api, output_api)) + results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles( + input_api, output_api)) + results.extend(PreCommitGo( + input_api, output_api, ['continuous-integration'])) + return results diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/README.md new file mode 100644 index 0000000000..a85380c421 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/README.md @@ -0,0 +1,78 @@ +go-render: A verbose recursive Go type-to-string conversion library. +==================================================================== + +[![GoDoc](https://godoc.org/github.com/luci/go-render?status.svg)](https://godoc.org/github.com/luci/go-render) +[![Build Status](https://travis-ci.org/luci/go-render.svg)](https://travis-ci.org/luci/go-render) + +This is not an official Google product. + +## Overview + +The *render* package implements a more verbose form of the standard Go string +formatter, `fmt.Sprintf("%#v", value)`, adding: + - Pointer recursion. Normally, Go stops at the first pointer and prints its + address. The *render* package will recurse and continue to render pointer + values. + - Recursion loop detection. Recursion is nice, but if a recursion path detects + a loop, *render* will note this and move on. + - Custom type name rendering. + - Deterministic key sorting for `string`- and `int`-keyed maps. + - Testing! + +Call `render.Render` and pass it an `interface{}`. + +For example: + +```Go +type customType int +type testStruct struct { + S string + V *map[string]int + I interface{} +} + +a := testStruct{ + S: "hello", + V: &map[string]int{"foo": 0, "bar": 1}, + I: customType(42), +} + +fmt.Println("Render test:") +fmt.Printf("fmt.Printf: %#v\n", a))) +fmt.Printf("render.Render: %s\n", Render(a)) +``` + +Yields: +``` +fmt.Printf: render.testStruct{S:"hello", V:(*map[string]int)(0x600dd065), I:42} +render.Render: render.testStruct{S:"hello", V:(*map[string]int){"bar":1, "foo":0}, I:render.customType(42)} +``` + +This is not intended to be a high-performance library, but it's not terrible +either. + +Contributing +------------ + + * Sign the [Google CLA](https://cla.developers.google.com/clas). + * Make sure your `user.email` and `user.name` are configured in `git config`. + * Install the [pcg](https://github.com/maruel/pre-commit-go) git hook: + `go get -u github.com/maruel/pre-commit-go/cmd/... && pcg` + +Run the following to setup the code review tool and create your first review: + + git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git $HOME/src/depot_tools + export PATH="$PATH:$HOME/src/depot_tools" + cd $GOROOT/github.com/luci/go-render + git checkout -b work origin/master + + # hack hack + + git commit -a -m "This is awesome\nR=joe@example.com" + # This will ask for your Google Account credentials. + git cl upload -s + # Wait for LGTM over email. + # Check the commit queue box in codereview website. + # Wait for the change to be tested and landed automatically. + +Use `git cl help` and `git cl help ` for more details. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS new file mode 100644 index 0000000000..e4172088dd --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS @@ -0,0 +1,26 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Watchlist Rules +# Refer: http://dev.chromium.org/developers/contributing-code/watchlists + +{ + + 'WATCHLIST_DEFINITIONS': { + 'all': { + 'filepath': '.+', + }, + }, + + 'WATCHLISTS': { + 'all': [ + # Add yourself here to get explicitly spammed. + 'maruel@chromium.org', + 'tandrii+luci-go@chromium.org', + 'todd@cloudera.com', + 'andrew.wang@cloudera.com', + ], + }, + +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml new file mode 100644 index 0000000000..074ee1f84d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml @@ -0,0 +1,78 @@ +# https://github.com/maruel/pre-commit-go configuration file to run checks +# automatically on commit, on push and on continuous integration service after +# a push or on merge of a pull request. +# +# See https://godoc.org/github.com/maruel/pre-commit-go/checks for more +# information. + +min_version: 0.4.7 +modes: + continuous-integration: + checks: + build: + - build_all: false + extra_args: [] + coverage: + - use_global_inference: false + use_coveralls: true + global: + min_coverage: 50 + max_coverage: 100 + per_dir_default: + min_coverage: 1 + max_coverage: 100 + per_dir: {} + gofmt: + - {} + goimports: + - {} + test: + - extra_args: + - -v + - -race + max_duration: 600 + lint: + checks: + golint: + - blacklist: [] + govet: + - blacklist: + - ' composite literal uses unkeyed fields' + max_duration: 15 + pre-commit: + checks: + build: + - build_all: false + extra_args: [] + gofmt: + - {} + test: + - extra_args: + - -short + max_duration: 35 + pre-push: + checks: + coverage: + - use_global_inference: false + use_coveralls: false + global: + min_coverage: 50 + max_coverage: 100 + per_dir_default: + min_coverage: 1 + max_coverage: 100 + per_dir: {} + goimports: + - {} + test: + - extra_args: + - -v + - -race + max_duration: 35 + +ignore_patterns: +- .* +- _* +- '*.pb.go' +- '*_string.go' +- '*-gen.go' diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render.go new file mode 100644 index 0000000000..313611ef0c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render.go @@ -0,0 +1,481 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package render + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" +) + +var builtinTypeMap = map[reflect.Kind]string{ + reflect.Bool: "bool", + reflect.Complex128: "complex128", + reflect.Complex64: "complex64", + reflect.Float32: "float32", + reflect.Float64: "float64", + reflect.Int16: "int16", + reflect.Int32: "int32", + reflect.Int64: "int64", + reflect.Int8: "int8", + reflect.Int: "int", + reflect.String: "string", + reflect.Uint16: "uint16", + reflect.Uint32: "uint32", + reflect.Uint64: "uint64", + reflect.Uint8: "uint8", + reflect.Uint: "uint", + reflect.Uintptr: "uintptr", +} + +var builtinTypeSet = map[string]struct{}{} + +func init() { + for _, v := range builtinTypeMap { + builtinTypeSet[v] = struct{}{} + } +} + +var typeOfString = reflect.TypeOf("") +var typeOfInt = reflect.TypeOf(int(1)) +var typeOfUint = reflect.TypeOf(uint(1)) +var typeOfFloat = reflect.TypeOf(10.1) + +// Render converts a structure to a string representation. Unline the "%#v" +// format string, this resolves pointer types' contents in structs, maps, and +// slices/arrays and prints their field values. +func Render(v interface{}) string { + buf := bytes.Buffer{} + s := (*traverseState)(nil) + s.render(&buf, 0, reflect.ValueOf(v), false) + return buf.String() +} + +// renderPointer is called to render a pointer value. +// +// This is overridable so that the test suite can have deterministic pointer +// values in its expectations. +var renderPointer = func(buf *bytes.Buffer, p uintptr) { + fmt.Fprintf(buf, "0x%016x", p) +} + +// traverseState is used to note and avoid recursion as struct members are being +// traversed. +// +// traverseState is allowed to be nil. Specifically, the root state is nil. +type traverseState struct { + parent *traverseState + ptr uintptr +} + +func (s *traverseState) forkFor(ptr uintptr) *traverseState { + for cur := s; cur != nil; cur = cur.parent { + if ptr == cur.ptr { + return nil + } + } + + fs := &traverseState{ + parent: s, + ptr: ptr, + } + return fs +} + +func (s *traverseState) render(buf *bytes.Buffer, ptrs int, v reflect.Value, implicit bool) { + if v.Kind() == reflect.Invalid { + buf.WriteString("nil") + return + } + vt := v.Type() + + // If the type being rendered is a potentially recursive type (a type that + // can contain itself as a member), we need to avoid recursion. + // + // If we've already seen this type before, mark that this is the case and + // write a recursion placeholder instead of actually rendering it. + // + // If we haven't seen it before, fork our `seen` tracking so any higher-up + // renderers will also render it at least once, then mark that we've seen it + // to avoid recursing on lower layers. + pe := uintptr(0) + vk := vt.Kind() + switch vk { + case reflect.Ptr: + // Since structs and arrays aren't pointers, they can't directly be + // recursed, but they can contain pointers to themselves. Record their + // pointer to avoid this. + switch v.Elem().Kind() { + case reflect.Struct, reflect.Array: + pe = v.Pointer() + } + + case reflect.Slice, reflect.Map: + pe = v.Pointer() + } + if pe != 0 { + s = s.forkFor(pe) + if s == nil { + buf.WriteString("") + return + } + } + + isAnon := func(t reflect.Type) bool { + if t.Name() != "" { + if _, ok := builtinTypeSet[t.Name()]; !ok { + return false + } + } + return t.Kind() != reflect.Interface + } + + switch vk { + case reflect.Struct: + if !implicit { + writeType(buf, ptrs, vt) + } + buf.WriteRune('{') + if rendered, ok := renderTime(v); ok { + buf.WriteString(rendered) + } else { + structAnon := vt.Name() == "" + for i := 0; i < vt.NumField(); i++ { + if i > 0 { + buf.WriteString(", ") + } + anon := structAnon && isAnon(vt.Field(i).Type) + + if !anon { + buf.WriteString(vt.Field(i).Name) + buf.WriteRune(':') + } + + s.render(buf, 0, v.Field(i), anon) + } + } + buf.WriteRune('}') + + case reflect.Slice: + if v.IsNil() { + if !implicit { + writeType(buf, ptrs, vt) + buf.WriteString("(nil)") + } else { + buf.WriteString("nil") + } + return + } + fallthrough + + case reflect.Array: + if !implicit { + writeType(buf, ptrs, vt) + } + anon := vt.Name() == "" && isAnon(vt.Elem()) + buf.WriteString("{") + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + s.render(buf, 0, v.Index(i), anon) + } + buf.WriteRune('}') + + case reflect.Map: + if !implicit { + writeType(buf, ptrs, vt) + } + if v.IsNil() { + buf.WriteString("(nil)") + } else { + buf.WriteString("{") + + mkeys := v.MapKeys() + tryAndSortMapKeys(vt, mkeys) + + kt := vt.Key() + keyAnon := typeOfString.ConvertibleTo(kt) || typeOfInt.ConvertibleTo(kt) || typeOfUint.ConvertibleTo(kt) || typeOfFloat.ConvertibleTo(kt) + valAnon := vt.Name() == "" && isAnon(vt.Elem()) + for i, mk := range mkeys { + if i > 0 { + buf.WriteString(", ") + } + + s.render(buf, 0, mk, keyAnon) + buf.WriteString(":") + s.render(buf, 0, v.MapIndex(mk), valAnon) + } + buf.WriteRune('}') + } + + case reflect.Ptr: + ptrs++ + fallthrough + case reflect.Interface: + if v.IsNil() { + writeType(buf, ptrs, v.Type()) + buf.WriteString("(nil)") + } else { + s.render(buf, ptrs, v.Elem(), false) + } + + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + writeType(buf, ptrs, vt) + buf.WriteRune('(') + renderPointer(buf, v.Pointer()) + buf.WriteRune(')') + + default: + tstr := vt.String() + implicit = implicit || (ptrs == 0 && builtinTypeMap[vk] == tstr) + if !implicit { + writeType(buf, ptrs, vt) + buf.WriteRune('(') + } + + switch vk { + case reflect.String: + fmt.Fprintf(buf, "%q", v.String()) + case reflect.Bool: + fmt.Fprintf(buf, "%v", v.Bool()) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fmt.Fprintf(buf, "%d", v.Int()) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + fmt.Fprintf(buf, "%d", v.Uint()) + + case reflect.Float32, reflect.Float64: + fmt.Fprintf(buf, "%g", v.Float()) + + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(buf, "%g", v.Complex()) + } + + if !implicit { + buf.WriteRune(')') + } + } +} + +func writeType(buf *bytes.Buffer, ptrs int, t reflect.Type) { + parens := ptrs > 0 + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + parens = true + } + + if parens { + buf.WriteRune('(') + for i := 0; i < ptrs; i++ { + buf.WriteRune('*') + } + } + + switch t.Kind() { + case reflect.Ptr: + if ptrs == 0 { + // This pointer was referenced from within writeType (e.g., as part of + // rendering a list), and so hasn't had its pointer asterisk accounted + // for. + buf.WriteRune('*') + } + writeType(buf, 0, t.Elem()) + + case reflect.Interface: + if n := t.Name(); n != "" { + buf.WriteString(t.String()) + } else { + buf.WriteString("interface{}") + } + + case reflect.Array: + buf.WriteRune('[') + buf.WriteString(strconv.FormatInt(int64(t.Len()), 10)) + buf.WriteRune(']') + writeType(buf, 0, t.Elem()) + + case reflect.Slice: + if t == reflect.SliceOf(t.Elem()) { + buf.WriteString("[]") + writeType(buf, 0, t.Elem()) + } else { + // Custom slice type, use type name. + buf.WriteString(t.String()) + } + + case reflect.Map: + if t == reflect.MapOf(t.Key(), t.Elem()) { + buf.WriteString("map[") + writeType(buf, 0, t.Key()) + buf.WriteRune(']') + writeType(buf, 0, t.Elem()) + } else { + // Custom map type, use type name. + buf.WriteString(t.String()) + } + + default: + buf.WriteString(t.String()) + } + + if parens { + buf.WriteRune(')') + } +} + +type cmpFn func(a, b reflect.Value) int + +type sortableValueSlice struct { + cmp cmpFn + elements []reflect.Value +} + +func (s sortableValueSlice) Len() int { + return len(s.elements) +} + +func (s sortableValueSlice) Less(i, j int) bool { + return s.cmp(s.elements[i], s.elements[j]) < 0 +} + +func (s sortableValueSlice) Swap(i, j int) { + s.elements[i], s.elements[j] = s.elements[j], s.elements[i] +} + +// cmpForType returns a cmpFn which sorts the data for some type t in the same +// order that a go-native map key is compared for equality. +func cmpForType(t reflect.Type) cmpFn { + switch t.Kind() { + case reflect.String: + return func(av, bv reflect.Value) int { + a, b := av.String(), bv.String() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Bool: + return func(av, bv reflect.Value) int { + a, b := av.Bool(), bv.Bool() + if !a && b { + return -1 + } else if a && !b { + return 1 + } + return 0 + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return func(av, bv reflect.Value) int { + a, b := av.Int(), bv.Int() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, reflect.Uintptr, reflect.UnsafePointer: + return func(av, bv reflect.Value) int { + a, b := av.Uint(), bv.Uint() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Float32, reflect.Float64: + return func(av, bv reflect.Value) int { + a, b := av.Float(), bv.Float() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Interface: + return func(av, bv reflect.Value) int { + a, b := av.InterfaceData(), bv.InterfaceData() + if a[0] < b[0] { + return -1 + } else if a[0] > b[0] { + return 1 + } + if a[1] < b[1] { + return -1 + } else if a[1] > b[1] { + return 1 + } + return 0 + } + + case reflect.Complex64, reflect.Complex128: + return func(av, bv reflect.Value) int { + a, b := av.Complex(), bv.Complex() + if real(a) < real(b) { + return -1 + } else if real(a) > real(b) { + return 1 + } + if imag(a) < imag(b) { + return -1 + } else if imag(a) > imag(b) { + return 1 + } + return 0 + } + + case reflect.Ptr, reflect.Chan: + return func(av, bv reflect.Value) int { + a, b := av.Pointer(), bv.Pointer() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Struct: + cmpLst := make([]cmpFn, t.NumField()) + for i := range cmpLst { + cmpLst[i] = cmpForType(t.Field(i).Type) + } + return func(a, b reflect.Value) int { + for i, cmp := range cmpLst { + if rslt := cmp(a.Field(i), b.Field(i)); rslt != 0 { + return rslt + } + } + return 0 + } + } + + return nil +} + +func tryAndSortMapKeys(mt reflect.Type, k []reflect.Value) { + if cmp := cmpForType(mt.Key()); cmp != nil { + sort.Sort(sortableValueSlice{cmp, k}) + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render_test.go new file mode 100644 index 0000000000..5ca21b0dec --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render_test.go @@ -0,0 +1,281 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package render + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "runtime" + "testing" + "time" +) + +func init() { + // For testing purposes, pointers will render as "PTR" so that they are + // deterministic. + renderPointer = func(buf *bytes.Buffer, p uintptr) { + buf.WriteString("PTR") + } +} + +func assertRendersLike(t *testing.T, name string, v interface{}, exp string) { + act := Render(v) + if act != exp { + _, _, line, _ := runtime.Caller(1) + t.Errorf("On line #%d, [%s] did not match expectations:\nExpected: %s\nActual : %s\n", line, name, exp, act) + } +} + +func TestRenderList(t *testing.T) { + t.Parallel() + + // Note that we make some of the fields exportable. This is to avoid a fun case + // where the first reflect.Value has a read-only bit set, but follow-on values + // do not, so recursion tests are off by one. + type testStruct struct { + Name string + I interface{} + + m string + } + + type myStringSlice []string + type myStringMap map[string]string + type myIntType int + type myStringType string + type myTypeWithTime struct{ Public, private time.Time } + + var date = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + populatedTimes := myTypeWithTime{date, date} + zeroTimes := myTypeWithTime{} + + s0 := "string0" + s0P := &s0 + mit := myIntType(42) + stringer := fmt.Stringer(nil) + + for i, tc := range []struct { + a interface{} + s string + }{ + {nil, `nil`}, + {make(chan int), `(chan int)(PTR)`}, + {&stringer, `(*fmt.Stringer)(nil)`}, + {123, `123`}, + {"hello", `"hello"`}, + {(*testStruct)(nil), `(*render.testStruct)(nil)`}, + {(**testStruct)(nil), `(**render.testStruct)(nil)`}, + {[]***testStruct(nil), `[]***render.testStruct(nil)`}, + {testStruct{Name: "foo", I: &testStruct{Name: "baz"}}, + `render.testStruct{Name:"foo", I:(*render.testStruct){Name:"baz", I:interface{}(nil), m:""}, m:""}`}, + {[]byte(nil), `[]uint8(nil)`}, + {[]byte{}, `[]uint8{}`}, + {map[string]string(nil), `map[string]string(nil)`}, + {[]*testStruct{ + {Name: "foo"}, + {Name: "bar"}, + }, `[]*render.testStruct{(*render.testStruct){Name:"foo", I:interface{}(nil), m:""}, ` + + `(*render.testStruct){Name:"bar", I:interface{}(nil), m:""}}`}, + {myStringSlice{"foo", "bar"}, `render.myStringSlice{"foo", "bar"}`}, + {myStringMap{"foo": "bar"}, `render.myStringMap{"foo":"bar"}`}, + {myIntType(12), `render.myIntType(12)`}, + {&mit, `(*render.myIntType)(42)`}, + {myStringType("foo"), `render.myStringType("foo")`}, + {zeroTimes, `render.myTypeWithTime{Public:time.Time{0}, private:time.Time{wall:0, ext:0, loc:(*time.Location)(nil)}}`}, + {populatedTimes, `render.myTypeWithTime{Public:time.Time{2000-01-01 00:00:00 +0000 UTC}, private:time.Time{wall:0, ext:63082281600, loc:(*time.Location)(nil)}}`}, + {struct { + a int + b string + }{123, "foo"}, `struct { a int; b string }{123, "foo"}`}, + {[]string{"foo", "foo", "bar", "baz", "qux", "qux"}, + `[]string{"foo", "foo", "bar", "baz", "qux", "qux"}`}, + {[...]int{1, 2, 3}, `[3]int{1, 2, 3}`}, + {map[string]bool{ + "foo": true, + "bar": false, + }, `map[string]bool{"bar":false, "foo":true}`}, + {map[int]string{1: "foo", 2: "bar"}, `map[int]string{1:"foo", 2:"bar"}`}, + {uint32(1337), `1337`}, + {3.14, `3.14`}, + {complex(3, 0.14), `(3+0.14i)`}, + {&s0, `(*string)("string0")`}, + {&s0P, `(**string)("string0")`}, + {[]interface{}{nil, 1, 2, nil}, `[]interface{}{interface{}(nil), 1, 2, interface{}(nil)}`}, + } { + assertRendersLike(t, fmt.Sprintf("Input #%d", i), tc.a, tc.s) + } +} + +func TestRenderRecursiveStruct(t *testing.T) { + type testStruct struct { + Name string + I interface{} + } + + s := &testStruct{ + Name: "recursive", + } + s.I = s + + assertRendersLike(t, "Recursive struct", s, + `(*render.testStruct){Name:"recursive", I:}`) +} + +func TestRenderRecursiveArray(t *testing.T) { + a := [2]interface{}{} + a[0] = &a + a[1] = &a + + assertRendersLike(t, "Recursive array", &a, + `(*[2]interface{}){, }`) +} + +func TestRenderRecursiveMap(t *testing.T) { + m := map[string]interface{}{} + foo := "foo" + m["foo"] = m + m["bar"] = [](*string){&foo, &foo} + v := []map[string]interface{}{m, m} + + assertRendersLike(t, "Recursive map", v, + `[]map[string]interface{}{{`+ + `"bar":[]*string{(*string)("foo"), (*string)("foo")}, `+ + `"foo":}, {`+ + `"bar":[]*string{(*string)("foo"), (*string)("foo")}, `+ + `"foo":}}`) +} + +func TestRenderImplicitType(t *testing.T) { + type namedStruct struct{ a, b int } + type namedInt int + + tcs := []struct { + in interface{} + expect string + }{ + { + []struct{ a, b int }{{1, 2}}, + "[]struct { a int; b int }{{1, 2}}", + }, + { + map[string]struct{ a, b int }{"hi": {1, 2}}, + `map[string]struct { a int; b int }{"hi":{1, 2}}`, + }, + { + map[namedInt]struct{}{10: {}}, + `map[render.namedInt]struct {}{10:{}}`, + }, + { + struct{ a, b int }{1, 2}, + `struct { a int; b int }{1, 2}`, + }, + { + namedStruct{1, 2}, + "render.namedStruct{a:1, b:2}", + }, + } + + for _, tc := range tcs { + assertRendersLike(t, reflect.TypeOf(tc.in).String(), tc.in, tc.expect) + } +} + +func ExampleInReadme() { + type customType int + type testStruct struct { + S string + V *map[string]int + I interface{} + } + + a := testStruct{ + S: "hello", + V: &map[string]int{"foo": 0, "bar": 1}, + I: customType(42), + } + + fmt.Println("Render test:") + fmt.Printf("fmt.Printf: %s\n", sanitizePointer(fmt.Sprintf("%#v", a))) + fmt.Printf("render.Render: %s\n", Render(a)) + // Output: Render test: + // fmt.Printf: render.testStruct{S:"hello", V:(*map[string]int)(0x600dd065), I:42} + // render.Render: render.testStruct{S:"hello", V:(*map[string]int){"bar":1, "foo":0}, I:render.customType(42)} +} + +var pointerRE = regexp.MustCompile(`\(0x[a-f0-9]+\)`) + +func sanitizePointer(s string) string { + return pointerRE.ReplaceAllString(s, "(0x600dd065)") +} + +type chanList []chan int + +func (c chanList) Len() int { return len(c) } +func (c chanList) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c chanList) Less(i, j int) bool { + return reflect.ValueOf(c[i]).Pointer() < reflect.ValueOf(c[j]).Pointer() +} + +func TestMapSortRendering(t *testing.T) { + type namedMapType map[int]struct{ a int } + type mapKey struct{ a, b int } + + chans := make(chanList, 5) + for i := range chans { + chans[i] = make(chan int) + } + + tcs := []struct { + in interface{} + expect string + }{ + { + map[uint32]struct{}{1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, 7: {}, 8: {}}, + "map[uint32]struct {}{1:{}, 2:{}, 3:{}, 4:{}, 5:{}, 6:{}, 7:{}, 8:{}}", + }, + { + map[int8]struct{}{1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, 7: {}, 8: {}}, + "map[int8]struct {}{1:{}, 2:{}, 3:{}, 4:{}, 5:{}, 6:{}, 7:{}, 8:{}}", + }, + { + map[uintptr]struct{}{1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, 7: {}, 8: {}}, + "map[uintptr]struct {}{1:{}, 2:{}, 3:{}, 4:{}, 5:{}, 6:{}, 7:{}, 8:{}}", + }, + { + namedMapType{10: struct{ a int }{20}}, + "render.namedMapType{10:struct { a int }{20}}", + }, + { + map[mapKey]struct{}{mapKey{3, 1}: {}, mapKey{1, 3}: {}, mapKey{1, 2}: {}, mapKey{2, 1}: {}}, + "map[render.mapKey]struct {}{render.mapKey{a:1, b:2}:{}, render.mapKey{a:1, b:3}:{}, render.mapKey{a:2, b:1}:{}, render.mapKey{a:3, b:1}:{}}", + }, + { + map[float64]struct{}{10.5: {}, 10.15: {}, 1203: {}, 1: {}, 2: {}}, + "map[float64]struct {}{1:{}, 2:{}, 10.15:{}, 10.5:{}, 1203:{}}", + }, + { + map[bool]struct{}{true: {}, false: {}}, + "map[bool]struct {}{false:{}, true:{}}", + }, + { + map[interface{}]struct{}{1: {}, 2: {}, 3: {}, "foo": {}}, + `map[interface{}]struct {}{1:{}, 2:{}, 3:{}, "foo":{}}`, + }, + { + map[complex64]struct{}{1 + 2i: {}, 2 + 1i: {}, 3 + 1i: {}, 1 + 3i: {}}, + "map[complex64]struct {}{(1+2i):{}, (1+3i):{}, (2+1i):{}, (3+1i):{}}", + }, + { + map[chan int]string{nil: "a", chans[0]: "b", chans[1]: "c", chans[2]: "d", chans[3]: "e", chans[4]: "f"}, + `map[(chan int)]string{(chan int)(PTR):"a", (chan int)(PTR):"b", (chan int)(PTR):"c", (chan int)(PTR):"d", (chan int)(PTR):"e", (chan int)(PTR):"f"}`, + }, + } + + for _, tc := range tcs { + assertRendersLike(t, reflect.TypeOf(tc.in).Name(), tc.in, tc.expect) + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render_time.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render_time.go new file mode 100644 index 0000000000..990c75d0ff --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/go-render/render/render_time.go @@ -0,0 +1,26 @@ +package render + +import ( + "reflect" + "time" +) + +func renderTime(value reflect.Value) (string, bool) { + if instant, ok := convertTime(value); !ok { + return "", false + } else if instant.IsZero() { + return "0", true + } else { + return instant.String(), true + } +} + +func convertTime(value reflect.Value) (t time.Time, ok bool) { + if value.Type() == timeType { + defer func() { recover() }() + t, ok = value.Interface().(time.Time) + } + return +} + +var timeType = reflect.TypeOf(time.Time{}) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/.gitignore b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/.gitignore new file mode 100644 index 0000000000..dd8fc7468f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/.gitignore @@ -0,0 +1,5 @@ +*.6 +6.out +_obj/ +_test/ +_testmain.go diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml new file mode 100644 index 0000000000..b97211926e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml @@ -0,0 +1,4 @@ +# Cf. http://docs.travis-ci.com/user/getting-started/ +# Cf. http://docs.travis-ci.com/user/languages/go/ + +language: go diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/LICENSE b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/README.md new file mode 100644 index 0000000000..215a2bb7a8 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/README.md @@ -0,0 +1,58 @@ +[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers) + +`oglematchers` is a package for the Go programming language containing a set of +matchers, useful in a testing or mocking framework, inspired by and mostly +compatible with [Google Test][googletest] for C++ and +[Google JS Test][google-js-test]. The package is used by the +[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking +framework, which may be more directly useful to you, but can be generically used +elsewhere as well. + +A "matcher" is simply an object with a `Matches` method defining a set of golang +values matched by the matcher, and a `Description` method describing that set. +For example, here are some matchers: + +```go +// Numbers +Equals(17.13) +LessThan(19) + +// Strings +Equals("taco") +HasSubstr("burrito") +MatchesRegex("t.*o") + +// Combining matchers +AnyOf(LessThan(17), GreaterThan(19)) +``` + +There are lots more; see [here][reference] for a reference. You can also add +your own simply by implementing the `oglematchers.Matcher` interface. + + +Installation +------------ + +First, make sure you have installed Go 1.0.2 or newer. See +[here][golang-install] for instructions. + +Use the following command to install `oglematchers` and keep it up to date: + + go get -u github.com/smartystreets/assertions/internal/oglematchers + + +Documentation +------------- + +See [here][reference] for documentation. Alternatively, you can install the +package and then use `godoc`: + + godoc github.com/smartystreets/assertions/internal/oglematchers + + +[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers +[golang-install]: http://golang.org/doc/install.html +[googletest]: http://code.google.com/p/googletest/ +[google-js-test]: http://code.google.com/p/google-js-test/ +[ogletest]: http://github.com/smartystreets/assertions/internal/ogletest +[oglemock]: http://github.com/smartystreets/assertions/internal/oglemock diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/any_of.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/any_of.go new file mode 100644 index 0000000000..2918b51f21 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/any_of.go @@ -0,0 +1,94 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// AnyOf accepts a set of values S and returns a matcher that follows the +// algorithm below when considering a candidate c: +// +// 1. If there exists a value m in S such that m implements the Matcher +// interface and m matches c, return true. +// +// 2. Otherwise, if there exists a value v in S such that v does not implement +// the Matcher interface and the matcher Equals(v) matches c, return true. +// +// 3. Otherwise, if there is a value m in S such that m implements the Matcher +// interface and m returns a fatal error for c, return that fatal error. +// +// 4. Otherwise, return false. +// +// This is akin to a logical OR operation for matchers, with non-matchers x +// being treated as Equals(x). +func AnyOf(vals ...interface{}) Matcher { + // Get ahold of a type variable for the Matcher interface. + var dummy *Matcher + matcherType := reflect.TypeOf(dummy).Elem() + + // Create a matcher for each value, or use the value itself if it's already a + // matcher. + wrapped := make([]Matcher, len(vals)) + for i, v := range vals { + t := reflect.TypeOf(v) + if t != nil && t.Implements(matcherType) { + wrapped[i] = v.(Matcher) + } else { + wrapped[i] = Equals(v) + } + } + + return &anyOfMatcher{wrapped} +} + +type anyOfMatcher struct { + wrapped []Matcher +} + +func (m *anyOfMatcher) Description() string { + wrappedDescs := make([]string, len(m.wrapped)) + for i, matcher := range m.wrapped { + wrappedDescs[i] = matcher.Description() + } + + return fmt.Sprintf("or(%s)", strings.Join(wrappedDescs, ", ")) +} + +func (m *anyOfMatcher) Matches(c interface{}) (err error) { + err = errors.New("") + + // Try each matcher in turn. + for _, matcher := range m.wrapped { + wrappedErr := matcher.Matches(c) + + // Return immediately if there's a match. + if wrappedErr == nil { + err = nil + return + } + + // Note the fatal error, if any. + if _, isFatal := wrappedErr.(*FatalError); isFatal { + err = wrappedErr + } + } + + return +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/contains.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/contains.go new file mode 100644 index 0000000000..87f107d392 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/contains.go @@ -0,0 +1,61 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// Return a matcher that matches arrays slices with at least one element that +// matches the supplied argument. If the argument x is not itself a Matcher, +// this is equivalent to Contains(Equals(x)). +func Contains(x interface{}) Matcher { + var result containsMatcher + var ok bool + + if result.elementMatcher, ok = x.(Matcher); !ok { + result.elementMatcher = DeepEquals(x) + } + + return &result +} + +type containsMatcher struct { + elementMatcher Matcher +} + +func (m *containsMatcher) Description() string { + return fmt.Sprintf("contains: %s", m.elementMatcher.Description()) +} + +func (m *containsMatcher) Matches(candidate interface{}) error { + // The candidate must be a slice or an array. + v := reflect.ValueOf(candidate) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return NewFatalError("which is not a slice or array") + } + + // Check each element. + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + if matchErr := m.elementMatcher.Matches(elem.Interface()); matchErr == nil { + return nil + } + } + + return fmt.Errorf("") +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go new file mode 100644 index 0000000000..1d91baef32 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go @@ -0,0 +1,88 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "bytes" + "errors" + "fmt" + "reflect" +) + +var byteSliceType reflect.Type = reflect.TypeOf([]byte{}) + +// DeepEquals returns a matcher that matches based on 'deep equality', as +// defined by the reflect package. This matcher requires that values have +// identical types to x. +func DeepEquals(x interface{}) Matcher { + return &deepEqualsMatcher{x} +} + +type deepEqualsMatcher struct { + x interface{} +} + +func (m *deepEqualsMatcher) Description() string { + xDesc := fmt.Sprintf("%v", m.x) + xValue := reflect.ValueOf(m.x) + + // Special case: fmt.Sprintf presents nil slices as "[]", but + // reflect.DeepEqual makes a distinction between nil and empty slices. Make + // this less confusing. + if xValue.Kind() == reflect.Slice && xValue.IsNil() { + xDesc = "" + } + + return fmt.Sprintf("deep equals: %s", xDesc) +} + +func (m *deepEqualsMatcher) Matches(c interface{}) error { + // Make sure the types match. + ct := reflect.TypeOf(c) + xt := reflect.TypeOf(m.x) + + if ct != xt { + return NewFatalError(fmt.Sprintf("which is of type %v", ct)) + } + + // Special case: handle byte slices more efficiently. + cValue := reflect.ValueOf(c) + xValue := reflect.ValueOf(m.x) + + if ct == byteSliceType && !cValue.IsNil() && !xValue.IsNil() { + xBytes := m.x.([]byte) + cBytes := c.([]byte) + + if bytes.Equal(cBytes, xBytes) { + return nil + } + + return errors.New("") + } + + // Defer to the reflect package. + if reflect.DeepEqual(m.x, c) { + return nil + } + + // Special case: if the comparison failed because c is the nil slice, given + // an indication of this (since its value is printed as "[]"). + if cValue.Kind() == reflect.Slice && cValue.IsNil() { + return errors.New("which is nil") + } + + return errors.New("") +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/equals.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/equals.go new file mode 100644 index 0000000000..a510707b3c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/equals.go @@ -0,0 +1,541 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +// Equals(x) returns a matcher that matches values v such that v and x are +// equivalent. This includes the case when the comparison v == x using Go's +// built-in comparison operator is legal (except for structs, which this +// matcher does not support), but for convenience the following rules also +// apply: +// +// * Type checking is done based on underlying types rather than actual +// types, so that e.g. two aliases for string can be compared: +// +// type stringAlias1 string +// type stringAlias2 string +// +// a := "taco" +// b := stringAlias1("taco") +// c := stringAlias2("taco") +// +// ExpectTrue(a == b) // Legal, passes +// ExpectTrue(b == c) // Illegal, doesn't compile +// +// ExpectThat(a, Equals(b)) // Passes +// ExpectThat(b, Equals(c)) // Passes +// +// * Values of numeric type are treated as if they were abstract numbers, and +// compared accordingly. Therefore Equals(17) will match int(17), +// int16(17), uint(17), float32(17), complex64(17), and so on. +// +// If you want a stricter matcher that contains no such cleverness, see +// IdenticalTo instead. +// +// Arrays are supported by this matcher, but do not participate in the +// exceptions above. Two arrays compared with this matcher must have identical +// types, and their element type must itself be comparable according to Go's == +// operator. +func Equals(x interface{}) Matcher { + v := reflect.ValueOf(x) + + // This matcher doesn't support structs. + if v.Kind() == reflect.Struct { + panic(fmt.Sprintf("oglematchers.Equals: unsupported kind %v", v.Kind())) + } + + // The == operator is not defined for non-nil slices. + if v.Kind() == reflect.Slice && v.Pointer() != uintptr(0) { + panic(fmt.Sprintf("oglematchers.Equals: non-nil slice")) + } + + return &equalsMatcher{v} +} + +type equalsMatcher struct { + expectedValue reflect.Value +} + +//////////////////////////////////////////////////////////////////////// +// Numeric types +//////////////////////////////////////////////////////////////////////// + +func isSignedInteger(v reflect.Value) bool { + k := v.Kind() + return k >= reflect.Int && k <= reflect.Int64 +} + +func isUnsignedInteger(v reflect.Value) bool { + k := v.Kind() + return k >= reflect.Uint && k <= reflect.Uintptr +} + +func isInteger(v reflect.Value) bool { + return isSignedInteger(v) || isUnsignedInteger(v) +} + +func isFloat(v reflect.Value) bool { + k := v.Kind() + return k == reflect.Float32 || k == reflect.Float64 +} + +func isComplex(v reflect.Value) bool { + k := v.Kind() + return k == reflect.Complex64 || k == reflect.Complex128 +} + +func checkAgainstInt64(e int64, c reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(c): + if c.Int() == e { + err = nil + } + + case isUnsignedInteger(c): + u := c.Uint() + if u <= math.MaxInt64 && int64(u) == e { + err = nil + } + + // Turn around the various floating point types so that the checkAgainst* + // functions for them can deal with precision issues. + case isFloat(c), isComplex(c): + return Equals(c.Interface()).Matches(e) + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstUint64(e uint64, c reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(c): + i := c.Int() + if i >= 0 && uint64(i) == e { + err = nil + } + + case isUnsignedInteger(c): + if c.Uint() == e { + err = nil + } + + // Turn around the various floating point types so that the checkAgainst* + // functions for them can deal with precision issues. + case isFloat(c), isComplex(c): + return Equals(c.Interface()).Matches(e) + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstFloat32(e float32, c reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(c): + if float32(c.Int()) == e { + err = nil + } + + case isUnsignedInteger(c): + if float32(c.Uint()) == e { + err = nil + } + + case isFloat(c): + // Compare using float32 to avoid a false sense of precision; otherwise + // e.g. Equals(float32(0.1)) won't match float32(0.1). + if float32(c.Float()) == e { + err = nil + } + + case isComplex(c): + comp := c.Complex() + rl := real(comp) + im := imag(comp) + + // Compare using float32 to avoid a false sense of precision; otherwise + // e.g. Equals(float32(0.1)) won't match (0.1 + 0i). + if im == 0 && float32(rl) == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstFloat64(e float64, c reflect.Value) (err error) { + err = errors.New("") + + ck := c.Kind() + + switch { + case isSignedInteger(c): + if float64(c.Int()) == e { + err = nil + } + + case isUnsignedInteger(c): + if float64(c.Uint()) == e { + err = nil + } + + // If the actual value is lower precision, turn the comparison around so we + // apply the low-precision rules. Otherwise, e.g. Equals(0.1) may not match + // float32(0.1). + case ck == reflect.Float32 || ck == reflect.Complex64: + return Equals(c.Interface()).Matches(e) + + // Otherwise, compare with double precision. + case isFloat(c): + if c.Float() == e { + err = nil + } + + case isComplex(c): + comp := c.Complex() + rl := real(comp) + im := imag(comp) + + if im == 0 && rl == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstComplex64(e complex64, c reflect.Value) (err error) { + err = errors.New("") + realPart := real(e) + imaginaryPart := imag(e) + + switch { + case isInteger(c) || isFloat(c): + // If we have no imaginary part, then we should just compare against the + // real part. Otherwise, we can't be equal. + if imaginaryPart != 0 { + return + } + + return checkAgainstFloat32(realPart, c) + + case isComplex(c): + // Compare using complex64 to avoid a false sense of precision; otherwise + // e.g. Equals(0.1 + 0i) won't match float32(0.1). + if complex64(c.Complex()) == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstComplex128(e complex128, c reflect.Value) (err error) { + err = errors.New("") + realPart := real(e) + imaginaryPart := imag(e) + + switch { + case isInteger(c) || isFloat(c): + // If we have no imaginary part, then we should just compare against the + // real part. Otherwise, we can't be equal. + if imaginaryPart != 0 { + return + } + + return checkAgainstFloat64(realPart, c) + + case isComplex(c): + if c.Complex() == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +//////////////////////////////////////////////////////////////////////// +// Other types +//////////////////////////////////////////////////////////////////////// + +func checkAgainstBool(e bool, c reflect.Value) (err error) { + if c.Kind() != reflect.Bool { + err = NewFatalError("which is not a bool") + return + } + + err = errors.New("") + if c.Bool() == e { + err = nil + } + return +} + +func checkAgainstChan(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "chan int". + typeStr := fmt.Sprintf("%s %s", e.Type().ChanDir(), e.Type().Elem()) + + // Make sure c is a chan of the correct type. + if c.Kind() != reflect.Chan || + c.Type().ChanDir() != e.Type().ChanDir() || + c.Type().Elem() != e.Type().Elem() { + err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstFunc(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a function. + if c.Kind() != reflect.Func { + err = NewFatalError("which is not a function") + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstMap(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a map. + if c.Kind() != reflect.Map { + err = NewFatalError("which is not a map") + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstPtr(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "*int". + typeStr := fmt.Sprintf("*%v", e.Type().Elem()) + + // Make sure c is a pointer of the correct type. + if c.Kind() != reflect.Ptr || + c.Type().Elem() != e.Type().Elem() { + err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstSlice(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "[]int". + typeStr := fmt.Sprintf("[]%v", e.Type().Elem()) + + // Make sure c is a slice of the correct type. + if c.Kind() != reflect.Slice || + c.Type().Elem() != e.Type().Elem() { + err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstString(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a string. + if c.Kind() != reflect.String { + err = NewFatalError("which is not a string") + return + } + + err = errors.New("") + if c.String() == e.String() { + err = nil + } + return +} + +func checkAgainstArray(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "[2]int". + typeStr := fmt.Sprintf("%v", e.Type()) + + // Make sure c is the correct type. + if c.Type() != e.Type() { + err = NewFatalError(fmt.Sprintf("which is not %s", typeStr)) + return + } + + // Check for equality. + if e.Interface() != c.Interface() { + err = errors.New("") + return + } + + return +} + +func checkAgainstUnsafePointer(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a pointer. + if c.Kind() != reflect.UnsafePointer { + err = NewFatalError("which is not a unsafe.Pointer") + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkForNil(c reflect.Value) (err error) { + err = errors.New("") + + // Make sure it is legal to call IsNil. + switch c.Kind() { + case reflect.Invalid: + case reflect.Chan: + case reflect.Func: + case reflect.Interface: + case reflect.Map: + case reflect.Ptr: + case reflect.Slice: + + default: + err = NewFatalError("which cannot be compared to nil") + return + } + + // Ask whether the value is nil. Handle a nil literal (kind Invalid) + // specially, since it's not legal to call IsNil there. + if c.Kind() == reflect.Invalid || c.IsNil() { + err = nil + } + return +} + +//////////////////////////////////////////////////////////////////////// +// Public implementation +//////////////////////////////////////////////////////////////////////// + +func (m *equalsMatcher) Matches(candidate interface{}) error { + e := m.expectedValue + c := reflect.ValueOf(candidate) + ek := e.Kind() + + switch { + case ek == reflect.Bool: + return checkAgainstBool(e.Bool(), c) + + case isSignedInteger(e): + return checkAgainstInt64(e.Int(), c) + + case isUnsignedInteger(e): + return checkAgainstUint64(e.Uint(), c) + + case ek == reflect.Float32: + return checkAgainstFloat32(float32(e.Float()), c) + + case ek == reflect.Float64: + return checkAgainstFloat64(e.Float(), c) + + case ek == reflect.Complex64: + return checkAgainstComplex64(complex64(e.Complex()), c) + + case ek == reflect.Complex128: + return checkAgainstComplex128(complex128(e.Complex()), c) + + case ek == reflect.Chan: + return checkAgainstChan(e, c) + + case ek == reflect.Func: + return checkAgainstFunc(e, c) + + case ek == reflect.Map: + return checkAgainstMap(e, c) + + case ek == reflect.Ptr: + return checkAgainstPtr(e, c) + + case ek == reflect.Slice: + return checkAgainstSlice(e, c) + + case ek == reflect.String: + return checkAgainstString(e, c) + + case ek == reflect.Array: + return checkAgainstArray(e, c) + + case ek == reflect.UnsafePointer: + return checkAgainstUnsafePointer(e, c) + + case ek == reflect.Invalid: + return checkForNil(c) + } + + panic(fmt.Sprintf("equalsMatcher.Matches: unexpected kind: %v", ek)) +} + +func (m *equalsMatcher) Description() string { + // Special case: handle nil. + if !m.expectedValue.IsValid() { + return "is nil" + } + + return fmt.Sprintf("%v", m.expectedValue.Interface()) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go new file mode 100644 index 0000000000..4b9d103a38 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go @@ -0,0 +1,39 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// GreaterOrEqual returns a matcher that matches integer, floating point, or +// strings values v such that v >= x. Comparison is not defined between numeric +// and string types, but is defined between all integer and floating point +// types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// GreaterOrEqual will panic. +func GreaterOrEqual(x interface{}) Matcher { + desc := fmt.Sprintf("greater than or equal to %v", x) + + // Special case: make it clear that strings are strings. + if reflect.TypeOf(x).Kind() == reflect.String { + desc = fmt.Sprintf("greater than or equal to \"%s\"", x) + } + + return transformDescription(Not(LessThan(x)), desc) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go new file mode 100644 index 0000000000..3eef32178f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go @@ -0,0 +1,39 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// GreaterThan returns a matcher that matches integer, floating point, or +// strings values v such that v > x. Comparison is not defined between numeric +// and string types, but is defined between all integer and floating point +// types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// GreaterThan will panic. +func GreaterThan(x interface{}) Matcher { + desc := fmt.Sprintf("greater than %v", x) + + // Special case: make it clear that strings are strings. + if reflect.TypeOf(x).Kind() == reflect.String { + desc = fmt.Sprintf("greater than \"%s\"", x) + } + + return transformDescription(Not(LessOrEqual(x)), desc) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go new file mode 100644 index 0000000000..8402cdeaf0 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go @@ -0,0 +1,41 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// LessOrEqual returns a matcher that matches integer, floating point, or +// strings values v such that v <= x. Comparison is not defined between numeric +// and string types, but is defined between all integer and floating point +// types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// LessOrEqual will panic. +func LessOrEqual(x interface{}) Matcher { + desc := fmt.Sprintf("less than or equal to %v", x) + + // Special case: make it clear that strings are strings. + if reflect.TypeOf(x).Kind() == reflect.String { + desc = fmt.Sprintf("less than or equal to \"%s\"", x) + } + + // Put LessThan last so that its error messages will be used in the event of + // failure. + return transformDescription(AnyOf(Equals(x), LessThan(x)), desc) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/less_than.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/less_than.go new file mode 100644 index 0000000000..8258e45d99 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/less_than.go @@ -0,0 +1,152 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +// LessThan returns a matcher that matches integer, floating point, or strings +// values v such that v < x. Comparison is not defined between numeric and +// string types, but is defined between all integer and floating point types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// LessThan will panic. +func LessThan(x interface{}) Matcher { + v := reflect.ValueOf(x) + kind := v.Kind() + + switch { + case isInteger(v): + case isFloat(v): + case kind == reflect.String: + + default: + panic(fmt.Sprintf("LessThan: unexpected kind %v", kind)) + } + + return &lessThanMatcher{v} +} + +type lessThanMatcher struct { + limit reflect.Value +} + +func (m *lessThanMatcher) Description() string { + // Special case: make it clear that strings are strings. + if m.limit.Kind() == reflect.String { + return fmt.Sprintf("less than \"%s\"", m.limit.String()) + } + + return fmt.Sprintf("less than %v", m.limit.Interface()) +} + +func compareIntegers(v1, v2 reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(v1) && isSignedInteger(v2): + if v1.Int() < v2.Int() { + err = nil + } + return + + case isSignedInteger(v1) && isUnsignedInteger(v2): + if v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() { + err = nil + } + return + + case isUnsignedInteger(v1) && isSignedInteger(v2): + if v1.Uint() <= math.MaxInt64 && int64(v1.Uint()) < v2.Int() { + err = nil + } + return + + case isUnsignedInteger(v1) && isUnsignedInteger(v2): + if v1.Uint() < v2.Uint() { + err = nil + } + return + } + + panic(fmt.Sprintf("compareIntegers: %v %v", v1, v2)) +} + +func getFloat(v reflect.Value) float64 { + switch { + case isSignedInteger(v): + return float64(v.Int()) + + case isUnsignedInteger(v): + return float64(v.Uint()) + + case isFloat(v): + return v.Float() + } + + panic(fmt.Sprintf("getFloat: %v", v)) +} + +func (m *lessThanMatcher) Matches(c interface{}) (err error) { + v1 := reflect.ValueOf(c) + v2 := m.limit + + err = errors.New("") + + // Handle strings as a special case. + if v1.Kind() == reflect.String && v2.Kind() == reflect.String { + if v1.String() < v2.String() { + err = nil + } + return + } + + // If we get here, we require that we are dealing with integers or floats. + v1Legal := isInteger(v1) || isFloat(v1) + v2Legal := isInteger(v2) || isFloat(v2) + if !v1Legal || !v2Legal { + err = NewFatalError("which is not comparable") + return + } + + // Handle the various comparison cases. + switch { + // Both integers + case isInteger(v1) && isInteger(v2): + return compareIntegers(v1, v2) + + // At least one float32 + case v1.Kind() == reflect.Float32 || v2.Kind() == reflect.Float32: + if float32(getFloat(v1)) < float32(getFloat(v2)) { + err = nil + } + return + + // At least one float64 + case v1.Kind() == reflect.Float64 || v2.Kind() == reflect.Float64: + if getFloat(v1) < getFloat(v2) { + err = nil + } + return + } + + // We shouldn't get here. + panic(fmt.Sprintf("lessThanMatcher.Matches: Shouldn't get here: %v %v", v1, v2)) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/matcher.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/matcher.go new file mode 100644 index 0000000000..78159a0727 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/matcher.go @@ -0,0 +1,86 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oglematchers provides a set of matchers useful in a testing or +// mocking framework. These matchers are inspired by and mostly compatible with +// Google Test for C++ and Google JS Test. +// +// This package is used by github.com/smartystreets/assertions/internal/ogletest and +// github.com/smartystreets/assertions/internal/oglemock, which may be more directly useful if you're not +// writing your own testing package or defining your own matchers. +package oglematchers + +// A Matcher is some predicate implicitly defining a set of values that it +// matches. For example, GreaterThan(17) matches all numeric values greater +// than 17, and HasSubstr("taco") matches all strings with the substring +// "taco". +// +// Matchers are typically exposed to tests via constructor functions like +// HasSubstr. In order to implement such a function you can either define your +// own matcher type or use NewMatcher. +type Matcher interface { + // Check whether the supplied value belongs to the the set defined by the + // matcher. Return a non-nil error if and only if it does not. + // + // The error describes why the value doesn't match. The error text is a + // relative clause that is suitable for being placed after the value. For + // example, a predicate that matches strings with a particular substring may, + // when presented with a numerical value, return the following error text: + // + // "which is not a string" + // + // Then the failure message may look like: + // + // Expected: has substring "taco" + // Actual: 17, which is not a string + // + // If the error is self-apparent based on the description of the matcher, the + // error text may be empty (but the error still non-nil). For example: + // + // Expected: 17 + // Actual: 19 + // + // If you are implementing a new matcher, see also the documentation on + // FatalError. + Matches(candidate interface{}) error + + // Description returns a string describing the property that values matching + // this matcher have, as a verb phrase where the subject is the value. For + // example, "is greather than 17" or "has substring "taco"". + Description() string +} + +// FatalError is an implementation of the error interface that may be returned +// from matchers, indicating the error should be propagated. Returning a +// *FatalError indicates that the matcher doesn't process values of the +// supplied type, or otherwise doesn't know how to handle the value. +// +// For example, if GreaterThan(17) returned false for the value "taco" without +// a fatal error, then Not(GreaterThan(17)) would return true. This is +// technically correct, but is surprising and may mask failures where the wrong +// sort of matcher is accidentally used. Instead, GreaterThan(17) can return a +// fatal error, which will be propagated by Not(). +type FatalError struct { + errorText string +} + +// NewFatalError creates a FatalError struct with the supplied error text. +func NewFatalError(s string) *FatalError { + return &FatalError{s} +} + +func (e *FatalError) Error() string { + return e.errorText +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/not.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/not.go new file mode 100644 index 0000000000..623789fe28 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/not.go @@ -0,0 +1,53 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" +) + +// Not returns a matcher that inverts the set of values matched by the wrapped +// matcher. It does not transform the result for values for which the wrapped +// matcher returns a fatal error. +func Not(m Matcher) Matcher { + return ¬Matcher{m} +} + +type notMatcher struct { + wrapped Matcher +} + +func (m *notMatcher) Matches(c interface{}) (err error) { + err = m.wrapped.Matches(c) + + // Did the wrapped matcher say yes? + if err == nil { + return errors.New("") + } + + // Did the wrapped matcher return a fatal error? + if _, isFatal := err.(*FatalError); isFatal { + return err + } + + // The wrapped matcher returned a non-fatal error. + return nil +} + +func (m *notMatcher) Description() string { + return fmt.Sprintf("not(%s)", m.wrapped.Description()) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go new file mode 100644 index 0000000000..8ea2807c6f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go @@ -0,0 +1,36 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +// transformDescription returns a matcher that is equivalent to the supplied +// one, except that it has the supplied description instead of the one attached +// to the existing matcher. +func transformDescription(m Matcher, newDesc string) Matcher { + return &transformDescriptionMatcher{newDesc, m} +} + +type transformDescriptionMatcher struct { + desc string + wrappedMatcher Matcher +} + +func (m *transformDescriptionMatcher) Description() string { + return m.desc +} + +func (m *transformDescriptionMatcher) Matches(c interface{}) error { + return m.wrappedMatcher.Matches(c) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/unit/fixture.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/unit/fixture.go new file mode 100644 index 0000000000..a7c48cdd4f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/internal/unit/fixture.go @@ -0,0 +1,125 @@ +// package unit implements a light-weight x-Unit style testing framework. +// It is basically a scaled-down version of github.com/smartystreets/gunit. +// See https://smartystreets.com/blog/2018/07/lets-build-xunit-in-go for +// an explanation of the basic moving parts. +package unit + +import ( + "bytes" + "fmt" + "reflect" + "runtime" + "strings" + "testing" +) + +func Run(fixture interface{}, t *testing.T) { + fixtureType := reflect.TypeOf(fixture) + + for x := 0; x < fixtureType.NumMethod(); x++ { + testMethodName := fixtureType.Method(x).Name + if strings.HasPrefix(testMethodName, "Test") { + t.Run(testMethodName, func(t *testing.T) { + instance := reflect.New(fixtureType.Elem()) + + innerFixture := newFixture(t, testing.Verbose()) + field := instance.Elem().FieldByName("Fixture") + field.Set(reflect.ValueOf(innerFixture)) + + defer innerFixture.Finalize() + + if setup := instance.MethodByName("Setup"); setup.IsValid() { + setup.Call(nil) + } + + instance.MethodByName(testMethodName).Call(nil) + + if teardown := instance.MethodByName("Teardown"); teardown.IsValid() { + teardown.Call(nil) + } + }) + } + } +} + +type Fixture struct { + t *testing.T + log *bytes.Buffer + verbose bool +} + +func newFixture(t *testing.T, verbose bool) *Fixture { + return &Fixture{t: t, verbose: verbose, log: &bytes.Buffer{}} +} + +func (this *Fixture) So(actual interface{}, assert assertion, expected ...interface{}) bool { + failure := assert(actual, expected...) + failed := len(failure) > 0 + if failed { + this.fail(failure) + } + return !failed +} + +func (this *Fixture) fail(failure string) { + this.t.Fail() + this.Print(failure) +} + +// Assert tests a boolean which, if not true, marks the current test case as failed and +// prints the provided message. +func (this *Fixture) Assert(condition bool, messages ...string) bool { + if !condition { + if len(messages) == 0 { + messages = append(messages, "Expected condition to be true, was false instead.") + } + this.fail(strings.Join(messages, ", ")) + } + return condition +} +func (this *Fixture) AssertEqual(expected, actual interface{}) bool { + return this.Assert(expected == actual, fmt.Sprintf(comparisonFormat, fmt.Sprint(expected), fmt.Sprint(actual))) +} +func (this *Fixture) AssertSprintEqual(expected, actual interface{}) bool { + return this.AssertEqual(fmt.Sprint(expected), fmt.Sprint(actual)) +} +func (this *Fixture) AssertSprintfEqual(expected, actual interface{}, format string) bool { + return this.AssertEqual(fmt.Sprintf(format, expected), fmt.Sprintf(format, actual)) +} +func (this *Fixture) AssertDeepEqual(expected, actual interface{}) bool { + return this.Assert(reflect.DeepEqual(expected, actual), + fmt.Sprintf(comparisonFormat, fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual))) +} + +const comparisonFormat = "Expected: [%s]\nActual: [%s]" + +func (this *Fixture) Error(args ...interface{}) { this.fail(fmt.Sprint(args...)) } +func (this *Fixture) Errorf(f string, args ...interface{}) { this.fail(fmt.Sprintf(f, args...)) } + +func (this *Fixture) Print(a ...interface{}) { fmt.Fprint(this.log, a...) } +func (this *Fixture) Printf(format string, a ...interface{}) { fmt.Fprintf(this.log, format, a...) } +func (this *Fixture) Println(a ...interface{}) { fmt.Fprintln(this.log, a...) } + +func (this *Fixture) Write(p []byte) (int, error) { return this.log.Write(p) } +func (this *Fixture) Failed() bool { return this.t.Failed() } +func (this *Fixture) Name() string { return this.t.Name() } + +func (this *Fixture) Finalize() { + if r := recover(); r != nil { + this.recoverPanic(r) + } + + if this.t.Failed() || (this.verbose && this.log.Len() > 0) { + this.t.Log("\n" + strings.TrimSpace(this.log.String()) + "\n") + } +} +func (this *Fixture) recoverPanic(r interface{}) { + this.Println("PANIC:", r) + buffer := make([]byte, 1024*16) + runtime.Stack(buffer, false) + this.Println(strings.TrimSpace(string(buffer))) + this.t.Fail() +} + +// assertion is a copy of github.com/smartystreets/assertions.assertion. +type assertion func(actual interface{}, expected ...interface{}) string diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/messages.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/messages.go new file mode 100644 index 0000000000..72782b008f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/messages.go @@ -0,0 +1,106 @@ +package assertions + +const ( + shouldHaveBeenEqual = "Expected: '%v'\nActual: '%v'\n(Should be equal)" + shouldHaveBeenEqualNoResemblance = "Both the actual and expected values render equally ('%s') and their types are the same. Try using ShouldResemble instead." + shouldNotHaveBeenEqual = "Expected '%v'\nto NOT equal '%v'\n(but it did)!" + shouldHaveBeenEqualTypeMismatch = "Expected: '%v' (%T)\nActual: '%v' (%T)\n(Should be equal, type mismatch)" + + shouldHaveBeenAlmostEqual = "Expected '%v' to almost equal '%v' (but it didn't)!" + shouldHaveNotBeenAlmostEqual = "Expected '%v' to NOT almost equal '%v' (but it did)!" + + shouldHaveResembled = "Expected: '%s'\nActual: '%s'\n(Should resemble)!" + shouldNotHaveResembled = "Expected '%#v'\nto NOT resemble '%#v'\n(but it did)!" + + shouldBePointers = "Both arguments should be pointers " + shouldHaveBeenNonNilPointer = shouldBePointers + "(the %s was %s)!" + shouldHavePointedTo = "Expected '%+v' (address: '%v') and '%+v' (address: '%v') to be the same address (but their weren't)!" + shouldNotHavePointedTo = "Expected '%+v' and '%+v' to be different references (but they matched: '%v')!" + + shouldHaveBeenNil = "Expected: nil\nActual: '%v'" + shouldNotHaveBeenNil = "Expected '%+v' to NOT be nil (but it was)!" + + shouldHaveBeenTrue = "Expected: true\nActual: %v" + shouldHaveBeenFalse = "Expected: false\nActual: %v" + + shouldHaveBeenZeroValue = "'%+v' should have been the zero value" //"Expected: (zero value)\nActual: %v" + shouldNotHaveBeenZeroValue = "'%+v' should NOT have been the zero value" + + shouldHaveBeenGreater = "Expected '%v' to be greater than '%v' (but it wasn't)!" + shouldHaveBeenGreaterOrEqual = "Expected '%v' to be greater than or equal to '%v' (but it wasn't)!" + + shouldHaveBeenLess = "Expected '%v' to be less than '%v' (but it wasn't)!" + shouldHaveBeenLessOrEqual = "Expected '%v' to be less than or equal to '%v' (but it wasn't)!" + + shouldHaveBeenBetween = "Expected '%v' to be between '%v' and '%v' (but it wasn't)!" + shouldNotHaveBeenBetween = "Expected '%v' NOT to be between '%v' and '%v' (but it was)!" + shouldHaveDifferentUpperAndLower = "The lower and upper bounds must be different values (they were both '%v')." + + shouldHaveBeenBetweenOrEqual = "Expected '%v' to be between '%v' and '%v' or equal to one of them (but it wasn't)!" + shouldNotHaveBeenBetweenOrEqual = "Expected '%v' NOT to be between '%v' and '%v' or equal to one of them (but it was)!" + + shouldHaveContained = "Expected the container (%v) to contain: '%v' (but it didn't)!" + shouldNotHaveContained = "Expected the container (%v) NOT to contain: '%v' (but it did)!" + shouldHaveBeenAValidCollection = "You must provide a valid container (was %v)!" + + shouldHaveContainedKey = "Expected the %v to contain the key: %v (but it didn't)!" + shouldNotHaveContainedKey = "Expected the %v NOT to contain the key: %v (but it did)!" + shouldHaveBeenAValidMap = "You must provide a valid map type (was %v)!" + + shouldHaveBeenIn = "Expected '%v' to be in the container (%v), but it wasn't!" + shouldNotHaveBeenIn = "Expected '%v' NOT to be in the container (%v), but it was!" + + shouldHaveBeenEmpty = "Expected %+v to be empty (but it wasn't)!" + shouldNotHaveBeenEmpty = "Expected %+v to NOT be empty (but it was)!" + + shouldHaveBeenAValidInteger = "You must provide a valid integer (was %v)!" + shouldHaveBeenAValidLength = "You must provide a valid positive integer (was %v)!" + shouldHaveHadLength = "Expected collection to have length equal to [%v], but it's length was [%v] instead! contents: %+v" + + shouldHaveStartedWith = "Expected '%v'\nto start with '%v'\n(but it didn't)!" + shouldNotHaveStartedWith = "Expected '%v'\nNOT to start with '%v'\n(but it did)!" + + shouldHaveEndedWith = "Expected '%v'\nto end with '%v'\n(but it didn't)!" + shouldNotHaveEndedWith = "Expected '%v'\nNOT to end with '%v'\n(but it did)!" + + shouldAllBeStrings = "All arguments to this assertion must be strings (you provided: %v)." + shouldBothBeStrings = "Both arguments to this assertion must be strings (you provided %v and %v)." + + shouldHaveContainedSubstring = "Expected '%s' to contain substring '%s' (but it didn't)!" + shouldNotHaveContainedSubstring = "Expected '%s' NOT to contain substring '%s' (but it did)!" + + shouldBeString = "The argument to this assertion must be a string (you provided %v)." + shouldHaveBeenBlank = "Expected '%s' to be blank (but it wasn't)!" + shouldNotHaveBeenBlank = "Expected value to NOT be blank (but it was)!" + + shouldUseVoidNiladicFunction = "You must provide a void, niladic function as the first argument!" + shouldHavePanicked = "Expected func() to panic (but it didn't)!" + shouldNotHavePanicked = "Expected func() NOT to panic (error: '%+v')!" + + shouldHavePanickedWith = "Expected func() to panic with '%v' (but it panicked with '%v')!" + shouldNotHavePanickedWith = "Expected func() NOT to panic with '%v' (but it did)!" + + shouldHaveBeenA = "Expected '%v' to be: '%v' (but was: '%v')!" + shouldNotHaveBeenA = "Expected '%v' to NOT be: '%v' (but it was)!" + + shouldHaveImplemented = "Expected: '%v interface support'\nActual: '%v' does not implement the interface!" + shouldNotHaveImplemented = "Expected '%v'\nto NOT implement '%v'\n(but it did)!" + shouldCompareWithInterfacePointer = "The expected value must be a pointer to an interface type (eg. *fmt.Stringer)" + shouldNotBeNilActual = "The actual value was 'nil' and should be a value or a pointer to a value!" + + shouldBeError = "Expected an error value (but was '%v' instead)!" + shouldBeErrorInvalidComparisonValue = "The final argument to this assertion must be a string or an error value (you provided: '%v')." + + shouldUseTimes = "You must provide time instances as arguments to this assertion." + shouldUseTimeSlice = "You must provide a slice of time instances as the first argument to this assertion." + shouldUseDurationAndTime = "You must provide a duration and a time as arguments to this assertion." + + shouldHaveHappenedBefore = "Expected '%v' to happen before '%v' (it happened '%v' after)!" + shouldHaveHappenedAfter = "Expected '%v' to happen after '%v' (it happened '%v' before)!" + shouldHaveHappenedBetween = "Expected '%v' to happen between '%v' and '%v' (it happened '%v' outside threshold)!" + shouldNotHaveHappenedOnOrBetween = "Expected '%v' to NOT happen on or between '%v' and '%v' (but it did)!" + + // format params: incorrect-index, previous-index, previous-time, incorrect-index, incorrect-time + shouldHaveBeenChronological = "The 'Time' at index [%d] should have happened after the previous one (but it didn't!):\n [%d]: %s\n [%d]: %s (see, it happened before!)" + shouldNotHaveBeenchronological = "The provided times should NOT be chronological, but they were." +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/panic.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/panic.go new file mode 100644 index 0000000000..7e75db1784 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/panic.go @@ -0,0 +1,115 @@ +package assertions + +import "fmt" + +// ShouldPanic receives a void, niladic function and expects to recover a panic. +func ShouldPanic(actual interface{}, expected ...interface{}) (message string) { + if fail := need(0, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered == nil { + message = shouldHavePanicked + } else { + message = success + } + }() + action() + + return +} + +// ShouldNotPanic receives a void, niladic function and expects to execute the function without any panic. +func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) { + if fail := need(0, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered != nil { + message = fmt.Sprintf(shouldNotHavePanicked, recovered) + } else { + message = success + } + }() + action() + + return +} + +// ShouldPanicWith receives a void, niladic function and expects to recover a panic with the second argument as the content. +func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) { + if fail := need(1, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered == nil { + message = shouldHavePanicked + } else { + if equal := ShouldEqual(recovered, expected[0]); equal != success { + message = serializer.serialize(expected[0], recovered, fmt.Sprintf(shouldHavePanickedWith, expected[0], recovered)) + } else { + message = success + } + } + }() + action() + + return +} + +// ShouldNotPanicWith receives a void, niladic function and expects to recover a panic whose content differs from the second argument. +func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) { + if fail := need(1, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered == nil { + message = success + } else { + if equal := ShouldEqual(recovered, expected[0]); equal == success { + message = fmt.Sprintf(shouldNotHavePanickedWith, expected[0]) + } else { + message = success + } + } + }() + action() + + return +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/panic_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/panic_test.go new file mode 100644 index 0000000000..e3abbf3036 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/panic_test.go @@ -0,0 +1,50 @@ +package assertions + +import "fmt" + +func (this *AssertionsFixture) TestShouldPanic() { + this.fail(so(func() {}, ShouldPanic, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + this.fail(so(func() {}, ShouldPanic, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + + this.fail(so(1, ShouldPanic), shouldUseVoidNiladicFunction) + this.fail(so(func(i int) {}, ShouldPanic), shouldUseVoidNiladicFunction) + this.fail(so(func() int { panic("hi") }, ShouldPanic), shouldUseVoidNiladicFunction) + + this.fail(so(func() {}, ShouldPanic), shouldHavePanicked) + this.pass(so(func() { panic("hi") }, ShouldPanic)) +} + +func (this *AssertionsFixture) TestShouldNotPanic() { + this.fail(so(func() {}, ShouldNotPanic, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + this.fail(so(func() {}, ShouldNotPanic, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + + this.fail(so(1, ShouldNotPanic), shouldUseVoidNiladicFunction) + this.fail(so(func(i int) {}, ShouldNotPanic), shouldUseVoidNiladicFunction) + + this.fail(so(func() { panic("hi") }, ShouldNotPanic), fmt.Sprintf(shouldNotHavePanicked, "hi")) + this.pass(so(func() {}, ShouldNotPanic)) +} + +func (this *AssertionsFixture) TestShouldPanicWith() { + this.fail(so(func() {}, ShouldPanicWith), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(func() {}, ShouldPanicWith, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(1, ShouldPanicWith, 1), shouldUseVoidNiladicFunction) + this.fail(so(func(i int) {}, ShouldPanicWith, "hi"), shouldUseVoidNiladicFunction) + this.fail(so(func() {}, ShouldPanicWith, "bye"), shouldHavePanicked) + this.fail(so(func() { panic("hi") }, ShouldPanicWith, "bye"), "bye|hi|Expected func() to panic with 'bye' (but it panicked with 'hi')!") + + this.pass(so(func() { panic("hi") }, ShouldPanicWith, "hi")) +} + +func (this *AssertionsFixture) TestShouldNotPanicWith() { + this.fail(so(func() {}, ShouldNotPanicWith), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(func() {}, ShouldNotPanicWith, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(1, ShouldNotPanicWith, 1), shouldUseVoidNiladicFunction) + this.fail(so(func(i int) {}, ShouldNotPanicWith, "hi"), shouldUseVoidNiladicFunction) + this.fail(so(func() { panic("hi") }, ShouldNotPanicWith, "hi"), "Expected func() NOT to panic with 'hi' (but it did)!") + + this.pass(so(func() {}, ShouldNotPanicWith, "bye")) + this.pass(so(func() { panic("hi") }, ShouldNotPanicWith, "bye")) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/quantity.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/quantity.go new file mode 100644 index 0000000000..f28b0a062b --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/quantity.go @@ -0,0 +1,141 @@ +package assertions + +import ( + "fmt" + + "github.com/smartystreets/assertions/internal/oglematchers" +) + +// ShouldBeGreaterThan receives exactly two parameters and ensures that the first is greater than the second. +func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + if matchError := oglematchers.GreaterThan(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenGreater, actual, expected[0]) + } + return success +} + +// ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that the first is greater than or equal to the second. +func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if matchError := oglematchers.GreaterOrEqual(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenGreaterOrEqual, actual, expected[0]) + } + return success +} + +// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than the second. +func ShouldBeLessThan(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if matchError := oglematchers.LessThan(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0]) + } + return success +} + +// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than or equal to the second. +func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if matchError := oglematchers.LessOrEqual(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenLessOrEqual, actual, expected[0]) + } + return success +} + +// ShouldBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is between both bounds (but not equal to either of them). +func ShouldBeBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if !isBetween(actual, lower, upper) { + return fmt.Sprintf(shouldHaveBeenBetween, actual, lower, upper) + } + return success +} + +// ShouldNotBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is NOT between both bounds. +func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if isBetween(actual, lower, upper) { + return fmt.Sprintf(shouldNotHaveBeenBetween, actual, lower, upper) + } + return success +} +func deriveBounds(values []interface{}) (lower interface{}, upper interface{}, fail string) { + lower = values[0] + upper = values[1] + + if ShouldNotEqual(lower, upper) != success { + return nil, nil, fmt.Sprintf(shouldHaveDifferentUpperAndLower, lower) + } else if ShouldBeLessThan(lower, upper) != success { + lower, upper = upper, lower + } + return lower, upper, success +} +func isBetween(value, lower, upper interface{}) bool { + if ShouldBeGreaterThan(value, lower) != success { + return false + } else if ShouldBeLessThan(value, upper) != success { + return false + } + return true +} + +// ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is between both bounds or equal to one of them. +func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if !isBetweenOrEqual(actual, lower, upper) { + return fmt.Sprintf(shouldHaveBeenBetweenOrEqual, actual, lower, upper) + } + return success +} + +// ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is nopt between the bounds nor equal to either of them. +func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if isBetweenOrEqual(actual, lower, upper) { + return fmt.Sprintf(shouldNotHaveBeenBetweenOrEqual, actual, lower, upper) + } + return success +} + +func isBetweenOrEqual(value, lower, upper interface{}) bool { + if ShouldBeGreaterThanOrEqualTo(value, lower) != success { + return false + } else if ShouldBeLessThanOrEqualTo(value, upper) != success { + return false + } + return true +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/quantity_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/quantity_test.go new file mode 100644 index 0000000000..ea04fc6451 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/quantity_test.go @@ -0,0 +1,143 @@ +package assertions + +func (this *AssertionsFixture) TestShouldBeGreaterThan() { + this.fail(so(1, ShouldBeGreaterThan), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldBeGreaterThan, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(1, ShouldBeGreaterThan, 0)) + this.pass(so(1.1, ShouldBeGreaterThan, 1)) + this.pass(so(1, ShouldBeGreaterThan, uint(0))) + this.pass(so("b", ShouldBeGreaterThan, "a")) + + this.fail(so(0, ShouldBeGreaterThan, 1), "Expected '0' to be greater than '1' (but it wasn't)!") + this.fail(so(1, ShouldBeGreaterThan, 1.1), "Expected '1' to be greater than '1.1' (but it wasn't)!") + this.fail(so(uint(0), ShouldBeGreaterThan, 1.1), "Expected '0' to be greater than '1.1' (but it wasn't)!") + this.fail(so("a", ShouldBeGreaterThan, "b"), "Expected 'a' to be greater than 'b' (but it wasn't)!") +} + +func (this *AssertionsFixture) TestShouldBeGreaterThanOrEqual() { + this.fail(so(1, ShouldBeGreaterThanOrEqualTo), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldBeGreaterThanOrEqualTo, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(1, ShouldBeGreaterThanOrEqualTo, 1)) + this.pass(so(1.1, ShouldBeGreaterThanOrEqualTo, 1.1)) + this.pass(so(1, ShouldBeGreaterThanOrEqualTo, uint(1))) + this.pass(so("b", ShouldBeGreaterThanOrEqualTo, "b")) + + this.pass(so(1, ShouldBeGreaterThanOrEqualTo, 0)) + this.pass(so(1.1, ShouldBeGreaterThanOrEqualTo, 1)) + this.pass(so(1, ShouldBeGreaterThanOrEqualTo, uint(0))) + this.pass(so("b", ShouldBeGreaterThanOrEqualTo, "a")) + + this.fail(so(0, ShouldBeGreaterThanOrEqualTo, 1), "Expected '0' to be greater than or equal to '1' (but it wasn't)!") + this.fail(so(1, ShouldBeGreaterThanOrEqualTo, 1.1), "Expected '1' to be greater than or equal to '1.1' (but it wasn't)!") + this.fail(so(uint(0), ShouldBeGreaterThanOrEqualTo, 1.1), "Expected '0' to be greater than or equal to '1.1' (but it wasn't)!") + this.fail(so("a", ShouldBeGreaterThanOrEqualTo, "b"), "Expected 'a' to be greater than or equal to 'b' (but it wasn't)!") +} + +func (this *AssertionsFixture) TestShouldBeLessThan() { + this.fail(so(1, ShouldBeLessThan), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldBeLessThan, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(0, ShouldBeLessThan, 1)) + this.pass(so(1, ShouldBeLessThan, 1.1)) + this.pass(so(uint(0), ShouldBeLessThan, 1)) + this.pass(so("a", ShouldBeLessThan, "b")) + + this.fail(so(1, ShouldBeLessThan, 0), "Expected '1' to be less than '0' (but it wasn't)!") + this.fail(so(1.1, ShouldBeLessThan, 1), "Expected '1.1' to be less than '1' (but it wasn't)!") + this.fail(so(1.1, ShouldBeLessThan, uint(0)), "Expected '1.1' to be less than '0' (but it wasn't)!") + this.fail(so("b", ShouldBeLessThan, "a"), "Expected 'b' to be less than 'a' (but it wasn't)!") +} + +func (this *AssertionsFixture) TestShouldBeLessThanOrEqualTo() { + this.fail(so(1, ShouldBeLessThanOrEqualTo), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldBeLessThanOrEqualTo, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so(1, ShouldBeLessThanOrEqualTo, 1)) + this.pass(so(1.1, ShouldBeLessThanOrEqualTo, 1.1)) + this.pass(so(uint(1), ShouldBeLessThanOrEqualTo, 1)) + this.pass(so("b", ShouldBeLessThanOrEqualTo, "b")) + + this.pass(so(0, ShouldBeLessThanOrEqualTo, 1)) + this.pass(so(1, ShouldBeLessThanOrEqualTo, 1.1)) + this.pass(so(uint(0), ShouldBeLessThanOrEqualTo, 1)) + this.pass(so("a", ShouldBeLessThanOrEqualTo, "b")) + + this.fail(so(1, ShouldBeLessThanOrEqualTo, 0), "Expected '1' to be less than or equal to '0' (but it wasn't)!") + this.fail(so(1.1, ShouldBeLessThanOrEqualTo, 1), "Expected '1.1' to be less than or equal to '1' (but it wasn't)!") + this.fail(so(1.1, ShouldBeLessThanOrEqualTo, uint(0)), "Expected '1.1' to be less than or equal to '0' (but it wasn't)!") + this.fail(so("b", ShouldBeLessThanOrEqualTo, "a"), "Expected 'b' to be less than or equal to 'a' (but it wasn't)!") +} + +func (this *AssertionsFixture) TestShouldBeBetween() { + this.fail(so(1, ShouldBeBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(1, ShouldBeBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(4, ShouldBeBetween, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + this.fail(so(7, ShouldBeBetween, 8, 12), "Expected '7' to be between '8' and '12' (but it wasn't)!") + this.fail(so(8, ShouldBeBetween, 8, 12), "Expected '8' to be between '8' and '12' (but it wasn't)!") + this.pass(so(9, ShouldBeBetween, 8, 12)) + this.pass(so(10, ShouldBeBetween, 8, 12)) + this.pass(so(11, ShouldBeBetween, 8, 12)) + this.fail(so(12, ShouldBeBetween, 8, 12), "Expected '12' to be between '8' and '12' (but it wasn't)!") + this.fail(so(13, ShouldBeBetween, 8, 12), "Expected '13' to be between '8' and '12' (but it wasn't)!") + + this.pass(so(1, ShouldBeBetween, 2, 0)) + this.fail(so(-1, ShouldBeBetween, 2, 0), "Expected '-1' to be between '0' and '2' (but it wasn't)!") +} + +func (this *AssertionsFixture) TestShouldNotBeBetween() { + this.fail(so(1, ShouldNotBeBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(1, ShouldNotBeBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(4, ShouldNotBeBetween, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + this.pass(so(7, ShouldNotBeBetween, 8, 12)) + this.pass(so(8, ShouldNotBeBetween, 8, 12)) + this.fail(so(9, ShouldNotBeBetween, 8, 12), "Expected '9' NOT to be between '8' and '12' (but it was)!") + this.fail(so(10, ShouldNotBeBetween, 8, 12), "Expected '10' NOT to be between '8' and '12' (but it was)!") + this.fail(so(11, ShouldNotBeBetween, 8, 12), "Expected '11' NOT to be between '8' and '12' (but it was)!") + this.pass(so(12, ShouldNotBeBetween, 8, 12)) + this.pass(so(13, ShouldNotBeBetween, 8, 12)) + + this.pass(so(-1, ShouldNotBeBetween, 2, 0)) + this.fail(so(1, ShouldNotBeBetween, 2, 0), "Expected '1' NOT to be between '0' and '2' (but it was)!") +} + +func (this *AssertionsFixture) TestShouldBeBetweenOrEqual() { + this.fail(so(1, ShouldBeBetweenOrEqual), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(1, ShouldBeBetweenOrEqual, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(4, ShouldBeBetweenOrEqual, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + this.fail(so(7, ShouldBeBetweenOrEqual, 8, 12), "Expected '7' to be between '8' and '12' or equal to one of them (but it wasn't)!") + this.pass(so(8, ShouldBeBetweenOrEqual, 8, 12)) + this.pass(so(9, ShouldBeBetweenOrEqual, 8, 12)) + this.pass(so(10, ShouldBeBetweenOrEqual, 8, 12)) + this.pass(so(11, ShouldBeBetweenOrEqual, 8, 12)) + this.pass(so(12, ShouldBeBetweenOrEqual, 8, 12)) + this.fail(so(13, ShouldBeBetweenOrEqual, 8, 12), "Expected '13' to be between '8' and '12' or equal to one of them (but it wasn't)!") + + this.pass(so(1, ShouldBeBetweenOrEqual, 2, 0)) + this.fail(so(-1, ShouldBeBetweenOrEqual, 2, 0), "Expected '-1' to be between '0' and '2' or equal to one of them (but it wasn't)!") +} + +func (this *AssertionsFixture) TestShouldNotBeBetweenOrEqual() { + this.fail(so(1, ShouldNotBeBetweenOrEqual), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(1, ShouldNotBeBetweenOrEqual, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(4, ShouldNotBeBetweenOrEqual, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + this.pass(so(7, ShouldNotBeBetweenOrEqual, 8, 12)) + this.fail(so(8, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '8' NOT to be between '8' and '12' or equal to one of them (but it was)!") + this.fail(so(9, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '9' NOT to be between '8' and '12' or equal to one of them (but it was)!") + this.fail(so(10, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '10' NOT to be between '8' and '12' or equal to one of them (but it was)!") + this.fail(so(11, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '11' NOT to be between '8' and '12' or equal to one of them (but it was)!") + this.fail(so(12, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '12' NOT to be between '8' and '12' or equal to one of them (but it was)!") + this.pass(so(13, ShouldNotBeBetweenOrEqual, 8, 12)) + + this.pass(so(-1, ShouldNotBeBetweenOrEqual, 2, 0)) + this.fail(so(1, ShouldNotBeBetweenOrEqual, 2, 0), "Expected '1' NOT to be between '0' and '2' or equal to one of them (but it was)!") +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/serializer.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/serializer.go new file mode 100644 index 0000000000..f1e3570edc --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/serializer.go @@ -0,0 +1,70 @@ +package assertions + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/smartystreets/assertions/internal/go-render/render" +) + +type Serializer interface { + serialize(expected, actual interface{}, message string) string + serializeDetailed(expected, actual interface{}, message string) string +} + +type failureSerializer struct{} + +func (self *failureSerializer) serializeDetailed(expected, actual interface{}, message string) string { + if index := strings.Index(message, " Diff:"); index > 0 { + message = message[:index] + } + view := FailureView{ + Message: message, + Expected: render.Render(expected), + Actual: render.Render(actual), + } + serialized, _ := json.Marshal(view) + return string(serialized) +} + +func (self *failureSerializer) serialize(expected, actual interface{}, message string) string { + if index := strings.Index(message, " Diff:"); index > 0 { + message = message[:index] + } + view := FailureView{ + Message: message, + Expected: fmt.Sprintf("%+v", expected), + Actual: fmt.Sprintf("%+v", actual), + } + serialized, _ := json.Marshal(view) + return string(serialized) +} + +func newSerializer() *failureSerializer { + return &failureSerializer{} +} + +/////////////////////////////////////////////////////////////////////////////// + +// This struct is also declared in github.com/smartystreets/goconvey/convey/reporting. +// The json struct tags should be equal in both declarations. +type FailureView struct { + Message string `json:"Message"` + Expected string `json:"Expected"` + Actual string `json:"Actual"` +} + +/////////////////////////////////////////////////////// + +// noopSerializer just gives back the original message. This is useful when we are using +// the assertions from a context other than the GoConvey Web UI, that requires the JSON +// structure provided by the failureSerializer. +type noopSerializer struct{} + +func (self *noopSerializer) serialize(expected, actual interface{}, message string) string { + return message +} +func (self *noopSerializer) serializeDetailed(expected, actual interface{}, message string) string { + return message +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/serializer_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/serializer_test.go new file mode 100644 index 0000000000..e081f8acc6 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/serializer_test.go @@ -0,0 +1,52 @@ +package assertions + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestFailureSerializerCreatesSerializedVersionOfAssertionResult(t *testing.T) { + thing1 := Thing1{"Hi"} + thing2 := Thing2{"Bye"} + message := "Super-hip failure message. Diff: Something that we don't need." + serializer := newSerializer() + + actualResult := serializer.serialize(thing1, thing2, message) + + expectedResult, _ := json.Marshal(FailureView{ + Message: "Super-hip failure message.", + Expected: fmt.Sprintf("%+v", thing1), + Actual: fmt.Sprintf("%+v", thing2), + }) + + if actualResult != string(expectedResult) { + t.Errorf("\nExpected: %s\nActual: %s", string(expectedResult), actualResult) + } + + actualResult = serializer.serializeDetailed(thing1, thing2, message) + expectedResult, _ = json.Marshal(FailureView{ + Message: "Super-hip failure message.", + Expected: fmt.Sprintf("%#v", thing1), + Actual: fmt.Sprintf("%#v", thing2), + }) + if actualResult != string(expectedResult) { + t.Errorf("\nExpected: %s\nActual: %s", string(expectedResult), actualResult) + } +} + +func TestNoopSerializerJustReturnsTheMessageInAllCases(t *testing.T) { + thing1 := Thing1{"Hi"} + thing2 := Thing2{"Bye"} + expected := "Super-hip failure message." + serializer := &noopSerializer{} + actual := serializer.serialize(thing1, thing2, expected) + if actual != expected { + t.Errorf("\nExpected: %s\nActual: %s", string(expected), actual) + } + + actual = serializer.serializeDetailed(thing1, thing2, expected) + if actual != expected { + t.Errorf("\nExpected: %s\nActual: %s", string(expected), actual) + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/should/should.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/should/should.go new file mode 100644 index 0000000000..a5817ed70d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/should/should.go @@ -0,0 +1,70 @@ +// package should is simply a rewording of the assertion +// functions in the assertions package. +package should + +import "github.com/smartystreets/assertions" + +var ( + AlmostEqual = assertions.ShouldAlmostEqual + BeBetween = assertions.ShouldBeBetween + BeBetweenOrEqual = assertions.ShouldBeBetweenOrEqual + BeBlank = assertions.ShouldBeBlank + BeChronological = assertions.ShouldBeChronological + BeEmpty = assertions.ShouldBeEmpty + BeError = assertions.ShouldBeError + BeFalse = assertions.ShouldBeFalse + BeGreaterThan = assertions.ShouldBeGreaterThan + BeGreaterThanOrEqualTo = assertions.ShouldBeGreaterThanOrEqualTo + BeIn = assertions.ShouldBeIn + BeLessThan = assertions.ShouldBeLessThan + BeLessThanOrEqualTo = assertions.ShouldBeLessThanOrEqualTo + BeNil = assertions.ShouldBeNil + BeTrue = assertions.ShouldBeTrue + BeZeroValue = assertions.ShouldBeZeroValue + Contain = assertions.ShouldContain + ContainKey = assertions.ShouldContainKey + ContainSubstring = assertions.ShouldContainSubstring + EndWith = assertions.ShouldEndWith + Equal = assertions.ShouldEqual + EqualJSON = assertions.ShouldEqualJSON + EqualTrimSpace = assertions.ShouldEqualTrimSpace + EqualWithout = assertions.ShouldEqualWithout + HappenAfter = assertions.ShouldHappenAfter + HappenBefore = assertions.ShouldHappenBefore + HappenBetween = assertions.ShouldHappenBetween + HappenOnOrAfter = assertions.ShouldHappenOnOrAfter + HappenOnOrBefore = assertions.ShouldHappenOnOrBefore + HappenOnOrBetween = assertions.ShouldHappenOnOrBetween + HappenWithin = assertions.ShouldHappenWithin + HaveLength = assertions.ShouldHaveLength + HaveSameTypeAs = assertions.ShouldHaveSameTypeAs + Implement = assertions.ShouldImplement + NotAlmostEqual = assertions.ShouldNotAlmostEqual + NotBeBetween = assertions.ShouldNotBeBetween + NotBeBetweenOrEqual = assertions.ShouldNotBeBetweenOrEqual + NotBeBlank = assertions.ShouldNotBeBlank + NotBeChronological = assertions.ShouldNotBeChronological + NotBeEmpty = assertions.ShouldNotBeEmpty + NotBeIn = assertions.ShouldNotBeIn + NotBeNil = assertions.ShouldNotBeNil + NotBeZeroValue = assertions.ShouldNotBeZeroValue + NotContain = assertions.ShouldNotContain + NotContainKey = assertions.ShouldNotContainKey + NotContainSubstring = assertions.ShouldNotContainSubstring + NotEndWith = assertions.ShouldNotEndWith + NotEqual = assertions.ShouldNotEqual + NotHappenOnOrBetween = assertions.ShouldNotHappenOnOrBetween + NotHappenWithin = assertions.ShouldNotHappenWithin + NotHaveSameTypeAs = assertions.ShouldNotHaveSameTypeAs + NotImplement = assertions.ShouldNotImplement + NotPanic = assertions.ShouldNotPanic + NotPanicWith = assertions.ShouldNotPanicWith + NotPointTo = assertions.ShouldNotPointTo + NotResemble = assertions.ShouldNotResemble + NotStartWith = assertions.ShouldNotStartWith + Panic = assertions.ShouldPanic + PanicWith = assertions.ShouldPanicWith + PointTo = assertions.ShouldPointTo + Resemble = assertions.ShouldResemble + StartWith = assertions.ShouldStartWith +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/strings.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/strings.go new file mode 100644 index 0000000000..dbc3f04790 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/strings.go @@ -0,0 +1,227 @@ +package assertions + +import ( + "fmt" + "reflect" + "strings" +) + +// ShouldStartWith receives exactly 2 string parameters and ensures that the first starts with the second. +func ShouldStartWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + prefix, prefixIsString := expected[0].(string) + + if !valueIsString || !prefixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldStartWith(value, prefix) +} +func shouldStartWith(value, prefix string) string { + if !strings.HasPrefix(value, prefix) { + shortval := value + if len(shortval) > len(prefix) { + shortval = shortval[:len(prefix)] + "..." + } + return serializer.serialize(prefix, shortval, fmt.Sprintf(shouldHaveStartedWith, value, prefix)) + } + return success +} + +// ShouldNotStartWith receives exactly 2 string parameters and ensures that the first does not start with the second. +func ShouldNotStartWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + prefix, prefixIsString := expected[0].(string) + + if !valueIsString || !prefixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldNotStartWith(value, prefix) +} +func shouldNotStartWith(value, prefix string) string { + if strings.HasPrefix(value, prefix) { + if value == "" { + value = "" + } + if prefix == "" { + prefix = "" + } + return fmt.Sprintf(shouldNotHaveStartedWith, value, prefix) + } + return success +} + +// ShouldEndWith receives exactly 2 string parameters and ensures that the first ends with the second. +func ShouldEndWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + suffix, suffixIsString := expected[0].(string) + + if !valueIsString || !suffixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldEndWith(value, suffix) +} +func shouldEndWith(value, suffix string) string { + if !strings.HasSuffix(value, suffix) { + shortval := value + if len(shortval) > len(suffix) { + shortval = "..." + shortval[len(shortval)-len(suffix):] + } + return serializer.serialize(suffix, shortval, fmt.Sprintf(shouldHaveEndedWith, value, suffix)) + } + return success +} + +// ShouldEndWith receives exactly 2 string parameters and ensures that the first does not end with the second. +func ShouldNotEndWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + suffix, suffixIsString := expected[0].(string) + + if !valueIsString || !suffixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldNotEndWith(value, suffix) +} +func shouldNotEndWith(value, suffix string) string { + if strings.HasSuffix(value, suffix) { + if value == "" { + value = "" + } + if suffix == "" { + suffix = "" + } + return fmt.Sprintf(shouldNotHaveEndedWith, value, suffix) + } + return success +} + +// ShouldContainSubstring receives exactly 2 string parameters and ensures that the first contains the second as a substring. +func ShouldContainSubstring(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + long, longOk := actual.(string) + short, shortOk := expected[0].(string) + + if !longOk || !shortOk { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + if !strings.Contains(long, short) { + return serializer.serialize(expected[0], actual, fmt.Sprintf(shouldHaveContainedSubstring, long, short)) + } + return success +} + +// ShouldNotContainSubstring receives exactly 2 string parameters and ensures that the first does NOT contain the second as a substring. +func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + long, longOk := actual.(string) + short, shortOk := expected[0].(string) + + if !longOk || !shortOk { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + if strings.Contains(long, short) { + return fmt.Sprintf(shouldNotHaveContainedSubstring, long, short) + } + return success +} + +// ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal to "". +func ShouldBeBlank(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + value, ok := actual.(string) + if !ok { + return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual)) + } + if value != "" { + return serializer.serialize("", value, fmt.Sprintf(shouldHaveBeenBlank, value)) + } + return success +} + +// ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is equal to "". +func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + value, ok := actual.(string) + if !ok { + return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual)) + } + if value == "" { + return shouldNotHaveBeenBlank + } + return success +} + +// ShouldEqualWithout receives exactly 3 string parameters and ensures that the first is equal to the second +// after removing all instances of the third from the first using strings.Replace(first, third, "", -1). +func ShouldEqualWithout(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualString, ok1 := actual.(string) + expectedString, ok2 := expected[0].(string) + replace, ok3 := expected[1].(string) + + if !ok1 || !ok2 || !ok3 { + return fmt.Sprintf(shouldAllBeStrings, []reflect.Type{ + reflect.TypeOf(actual), + reflect.TypeOf(expected[0]), + reflect.TypeOf(expected[1]), + }) + } + + replaced := strings.Replace(actualString, replace, "", -1) + if replaced == expectedString { + return "" + } + + return fmt.Sprintf("Expected '%s' to equal '%s' but without any '%s' (but it didn't).", actualString, expectedString, replace) +} + +// ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the first is equal to the second +// after removing all leading and trailing whitespace using strings.TrimSpace(first). +func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + actualString, valueIsString := actual.(string) + _, value2IsString := expected[0].(string) + + if !valueIsString || !value2IsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + actualString = strings.TrimSpace(actualString) + return ShouldEqual(actualString, expected[0]) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/strings_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/strings_test.go new file mode 100644 index 0000000000..9850c07741 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/strings_test.go @@ -0,0 +1,108 @@ +package assertions + +func (this *AssertionsFixture) TestShouldStartWith() { + this.fail(so("", ShouldStartWith), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so("", ShouldStartWith, "asdf", "asdf"), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so("", ShouldStartWith, "")) + this.fail(so("", ShouldStartWith, "x"), "x||Expected '' to start with 'x' (but it didn't)!") + this.pass(so("abc", ShouldStartWith, "abc")) + this.fail(so("abc", ShouldStartWith, "abcd"), "abcd|abc|Expected 'abc' to start with 'abcd' (but it didn't)!") + + this.pass(so("superman", ShouldStartWith, "super")) + this.fail(so("superman", ShouldStartWith, "bat"), "bat|sup...|Expected 'superman' to start with 'bat' (but it didn't)!") + this.fail(so("superman", ShouldStartWith, "man"), "man|sup...|Expected 'superman' to start with 'man' (but it didn't)!") + + this.fail(so(1, ShouldStartWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func (this *AssertionsFixture) TestShouldNotStartWith() { + this.fail(so("", ShouldNotStartWith), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so("", ShouldNotStartWith, "asdf", "asdf"), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.fail(so("", ShouldNotStartWith, ""), "Expected '' NOT to start with '' (but it did)!") + this.fail(so("superman", ShouldNotStartWith, "super"), "Expected 'superman' NOT to start with 'super' (but it did)!") + this.pass(so("superman", ShouldNotStartWith, "bat")) + this.pass(so("superman", ShouldNotStartWith, "man")) + + this.fail(so(1, ShouldNotStartWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func (this *AssertionsFixture) TestShouldEndWith() { + this.fail(so("", ShouldEndWith), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so("", ShouldEndWith, "", ""), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.pass(so("", ShouldEndWith, "")) + this.fail(so("", ShouldEndWith, "z"), "z||Expected '' to end with 'z' (but it didn't)!") + this.pass(so("xyz", ShouldEndWith, "xyz")) + this.fail(so("xyz", ShouldEndWith, "wxyz"), "wxyz|xyz|Expected 'xyz' to end with 'wxyz' (but it didn't)!") + + this.pass(so("superman", ShouldEndWith, "man")) + this.fail(so("superman", ShouldEndWith, "super"), "super|...erman|Expected 'superman' to end with 'super' (but it didn't)!") + this.fail(so("superman", ShouldEndWith, "blah"), "blah|...rman|Expected 'superman' to end with 'blah' (but it didn't)!") + + this.fail(so(1, ShouldEndWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func (this *AssertionsFixture) TestShouldNotEndWith() { + this.fail(so("", ShouldNotEndWith), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so("", ShouldNotEndWith, "", ""), "This assertion requires exactly 1 comparison values (you provided 2).") + + this.fail(so("", ShouldNotEndWith, ""), "Expected '' NOT to end with '' (but it did)!") + this.fail(so("superman", ShouldNotEndWith, "man"), "Expected 'superman' NOT to end with 'man' (but it did)!") + this.pass(so("superman", ShouldNotEndWith, "super")) + + this.fail(so(1, ShouldNotEndWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func (this *AssertionsFixture) TestShouldContainSubstring() { + this.fail(so("asdf", ShouldContainSubstring), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so("asdf", ShouldContainSubstring, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(123, ShouldContainSubstring, 23), "Both arguments to this assertion must be strings (you provided int and int).") + + this.pass(so("asdf", ShouldContainSubstring, "sd")) + this.fail(so("qwer", ShouldContainSubstring, "sd"), "sd|qwer|Expected 'qwer' to contain substring 'sd' (but it didn't)!") +} + +func (this *AssertionsFixture) TestShouldNotContainSubstring() { + this.fail(so("asdf", ShouldNotContainSubstring), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so("asdf", ShouldNotContainSubstring, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(123, ShouldNotContainSubstring, 23), "Both arguments to this assertion must be strings (you provided int and int).") + + this.pass(so("qwer", ShouldNotContainSubstring, "sd")) + this.fail(so("asdf", ShouldNotContainSubstring, "sd"), "Expected 'asdf' NOT to contain substring 'sd' (but it did)!") +} + +func (this *AssertionsFixture) TestShouldBeBlank() { + this.fail(so("", ShouldBeBlank, "adsf"), "This assertion requires exactly 0 comparison values (you provided 1).") + this.fail(so(1, ShouldBeBlank), "The argument to this assertion must be a string (you provided int).") + + this.fail(so("asdf", ShouldBeBlank), "|asdf|Expected 'asdf' to be blank (but it wasn't)!") + this.pass(so("", ShouldBeBlank)) +} + +func (this *AssertionsFixture) TestShouldNotBeBlank() { + this.fail(so("", ShouldNotBeBlank, "adsf"), "This assertion requires exactly 0 comparison values (you provided 1).") + this.fail(so(1, ShouldNotBeBlank), "The argument to this assertion must be a string (you provided int).") + + this.fail(so("", ShouldNotBeBlank), "Expected value to NOT be blank (but it was)!") + this.pass(so("asdf", ShouldNotBeBlank)) +} + +func (this *AssertionsFixture) TestShouldEqualWithout() { + this.fail(so("", ShouldEqualWithout, ""), "This assertion requires exactly 2 comparison values (you provided 1).") + this.fail(so(1, ShouldEqualWithout, 2, 3), "All arguments to this assertion must be strings (you provided: [int int int]).") + + this.fail(so("asdf", ShouldEqualWithout, "qwer", "q"), "Expected 'asdf' to equal 'qwer' but without any 'q' (but it didn't).") + this.pass(so("asdf", ShouldEqualWithout, "df", "as")) +} + +func (this *AssertionsFixture) TestShouldEqualTrimSpace() { + this.fail(so(" asdf ", ShouldEqualTrimSpace), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldEqualTrimSpace, 2), "Both arguments to this assertion must be strings (you provided int and int).") + + this.fail(so("asdf", ShouldEqualTrimSpace, "qwer"), "qwer|asdf|Expected: 'qwer' Actual: 'asdf' (Should be equal)") + this.pass(so(" asdf\t\n", ShouldEqualTrimSpace, "asdf")) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/time.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/time.go new file mode 100644 index 0000000000..918ee2840e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/time.go @@ -0,0 +1,218 @@ +package assertions + +import ( + "fmt" + "time" +) + +// ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the first happens before the second. +func ShouldHappenBefore(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + + if !actualTime.Before(expectedTime) { + return fmt.Sprintf(shouldHaveHappenedBefore, actualTime, expectedTime, actualTime.Sub(expectedTime)) + } + + return success +} + +// ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that the first happens on or before the second. +func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + + if actualTime.Equal(expectedTime) { + return success + } + return ShouldHappenBefore(actualTime, expectedTime) +} + +// ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the first happens after the second. +func ShouldHappenAfter(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + if !actualTime.After(expectedTime) { + return fmt.Sprintf(shouldHaveHappenedAfter, actualTime, expectedTime, expectedTime.Sub(actualTime)) + } + return success +} + +// ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that the first happens on or after the second. +func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + if actualTime.Equal(expectedTime) { + return success + } + return ShouldHappenAfter(actualTime, expectedTime) +} + +// ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the first happens between (not on) the second and third. +func ShouldHappenBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + min, secondOk := expected[0].(time.Time) + max, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseTimes + } + + if !actualTime.After(min) { + return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, min.Sub(actualTime)) + } + if !actualTime.Before(max) { + return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, actualTime.Sub(max)) + } + return success +} + +// ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first happens between or on the second and third. +func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + min, secondOk := expected[0].(time.Time) + max, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseTimes + } + if actualTime.Equal(min) || actualTime.Equal(max) { + return success + } + return ShouldHappenBetween(actualTime, min, max) +} + +// ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first +// does NOT happen between or on the second or third. +func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + min, secondOk := expected[0].(time.Time) + max, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseTimes + } + if actualTime.Equal(min) || actualTime.Equal(max) { + return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max) + } + if actualTime.After(min) && actualTime.Before(max) { + return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max) + } + return success +} + +// ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments) +// and asserts that the first time.Time happens within or on the duration specified relative to +// the other time.Time. +func ShouldHappenWithin(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + tolerance, secondOk := expected[0].(time.Duration) + threshold, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseDurationAndTime + } + + min := threshold.Add(-tolerance) + max := threshold.Add(tolerance) + return ShouldHappenOnOrBetween(actualTime, min, max) +} + +// ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments) +// and asserts that the first time.Time does NOT happen within or on the duration specified relative to +// the other time.Time. +func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + tolerance, secondOk := expected[0].(time.Duration) + threshold, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseDurationAndTime + } + + min := threshold.Add(-tolerance) + max := threshold.Add(tolerance) + return ShouldNotHappenOnOrBetween(actualTime, min, max) +} + +// ShouldBeChronological receives a []time.Time slice and asserts that they are +// in chronological order starting with the first time.Time as the earliest. +func ShouldBeChronological(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + + times, ok := actual.([]time.Time) + if !ok { + return shouldUseTimeSlice + } + + var previous time.Time + for i, current := range times { + if i > 0 && current.Before(previous) { + return fmt.Sprintf(shouldHaveBeenChronological, + i, i-1, previous.String(), i, current.String()) + } + previous = current + } + return "" +} + +// ShouldNotBeChronological receives a []time.Time slice and asserts that they are +// NOT in chronological order. +func ShouldNotBeChronological(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + if _, ok := actual.([]time.Time); !ok { + return shouldUseTimeSlice + } + result := ShouldBeChronological(actual, expected...) + if result != "" { + return "" + } + return shouldNotHaveBeenchronological +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/time_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/time_test.go new file mode 100644 index 0000000000..c98858909f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/time_test.go @@ -0,0 +1,167 @@ +package assertions + +import ( + "fmt" + "time" +) + +func (this *AssertionsFixture) TestShouldHappenBefore() { + this.fail(so(0, ShouldHappenBefore), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(0, ShouldHappenBefore, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(0, ShouldHappenBefore, 1), shouldUseTimes) + this.fail(so(0, ShouldHappenBefore, time.Now()), shouldUseTimes) + this.fail(so(time.Now(), ShouldHappenBefore, 0), shouldUseTimes) + + this.fail(so(january3, ShouldHappenBefore, january1), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '48h0m0s' after)!", pretty(january3), pretty(january1))) + this.fail(so(january3, ShouldHappenBefore, january3), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '0s' after)!", pretty(january3), pretty(january3))) + this.pass(so(january1, ShouldHappenBefore, january3)) +} + +func (this *AssertionsFixture) TestShouldHappenOnOrBefore() { + this.fail(so(0, ShouldHappenOnOrBefore), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(0, ShouldHappenOnOrBefore, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(0, ShouldHappenOnOrBefore, 1), shouldUseTimes) + this.fail(so(0, ShouldHappenOnOrBefore, time.Now()), shouldUseTimes) + this.fail(so(time.Now(), ShouldHappenOnOrBefore, 0), shouldUseTimes) + + this.fail(so(january3, ShouldHappenOnOrBefore, january1), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '48h0m0s' after)!", pretty(january3), pretty(january1))) + this.pass(so(january3, ShouldHappenOnOrBefore, january3)) + this.pass(so(january1, ShouldHappenOnOrBefore, january3)) +} + +func (this *AssertionsFixture) TestShouldHappenAfter() { + this.fail(so(0, ShouldHappenAfter), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(0, ShouldHappenAfter, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(0, ShouldHappenAfter, 1), shouldUseTimes) + this.fail(so(0, ShouldHappenAfter, time.Now()), shouldUseTimes) + this.fail(so(time.Now(), ShouldHappenAfter, 0), shouldUseTimes) + + this.fail(so(january1, ShouldHappenAfter, january2), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '24h0m0s' before)!", pretty(january1), pretty(january2))) + this.fail(so(january1, ShouldHappenAfter, january1), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '0s' before)!", pretty(january1), pretty(january1))) + this.pass(so(january3, ShouldHappenAfter, january1)) +} + +func (this *AssertionsFixture) TestShouldHappenOnOrAfter() { + this.fail(so(0, ShouldHappenOnOrAfter), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(0, ShouldHappenOnOrAfter, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(0, ShouldHappenOnOrAfter, 1), shouldUseTimes) + this.fail(so(0, ShouldHappenOnOrAfter, time.Now()), shouldUseTimes) + this.fail(so(time.Now(), ShouldHappenOnOrAfter, 0), shouldUseTimes) + + this.fail(so(january1, ShouldHappenOnOrAfter, january2), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '24h0m0s' before)!", pretty(january1), pretty(january2))) + this.pass(so(january1, ShouldHappenOnOrAfter, january1)) + this.pass(so(january3, ShouldHappenOnOrAfter, january1)) +} + +func (this *AssertionsFixture) TestShouldHappenBetween() { + this.fail(so(0, ShouldHappenBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(0, ShouldHappenBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(0, ShouldHappenBetween, 1, 2), shouldUseTimes) + this.fail(so(0, ShouldHappenBetween, time.Now(), time.Now()), shouldUseTimes) + this.fail(so(time.Now(), ShouldHappenBetween, 0, time.Now()), shouldUseTimes) + this.fail(so(time.Now(), ShouldHappenBetween, time.Now(), 9), shouldUseTimes) + + this.fail(so(january1, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4))) + this.fail(so(january2, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '0s' outside threshold)!", pretty(january2), pretty(january2), pretty(january4))) + this.pass(so(january3, ShouldHappenBetween, january2, january4)) + this.fail(so(january4, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '0s' outside threshold)!", pretty(january4), pretty(january2), pretty(january4))) + this.fail(so(january5, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4))) +} + +func (this *AssertionsFixture) TestShouldHappenOnOrBetween() { + this.fail(so(0, ShouldHappenOnOrBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(0, ShouldHappenOnOrBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(0, ShouldHappenOnOrBetween, 1, time.Now()), shouldUseTimes) + this.fail(so(0, ShouldHappenOnOrBetween, time.Now(), 1), shouldUseTimes) + this.fail(so(time.Now(), ShouldHappenOnOrBetween, 0, 1), shouldUseTimes) + + this.fail(so(january1, ShouldHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4))) + this.pass(so(january2, ShouldHappenOnOrBetween, january2, january4)) + this.pass(so(january3, ShouldHappenOnOrBetween, january2, january4)) + this.pass(so(january4, ShouldHappenOnOrBetween, january2, january4)) + this.fail(so(january5, ShouldHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4))) +} + +func (this *AssertionsFixture) TestShouldNotHappenOnOrBetween() { + this.fail(so(0, ShouldNotHappenOnOrBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(0, ShouldNotHappenOnOrBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(0, ShouldNotHappenOnOrBetween, 1, time.Now()), shouldUseTimes) + this.fail(so(0, ShouldNotHappenOnOrBetween, time.Now(), 1), shouldUseTimes) + this.fail(so(time.Now(), ShouldNotHappenOnOrBetween, 0, 1), shouldUseTimes) + + this.pass(so(january1, ShouldNotHappenOnOrBetween, january2, january4)) + this.fail(so(january2, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january2), pretty(january2), pretty(january4))) + this.fail(so(january3, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january3), pretty(january2), pretty(january4))) + this.fail(so(january4, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january4), pretty(january2), pretty(january4))) + this.pass(so(january5, ShouldNotHappenOnOrBetween, january2, january4)) +} + +func (this *AssertionsFixture) TestShouldHappenWithin() { + this.fail(so(0, ShouldHappenWithin), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(0, ShouldHappenWithin, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(0, ShouldHappenWithin, 1, 2), shouldUseDurationAndTime) + this.fail(so(0, ShouldHappenWithin, oneDay, time.Now()), shouldUseDurationAndTime) + this.fail(so(time.Now(), ShouldHappenWithin, 0, time.Now()), shouldUseDurationAndTime) + + this.fail(so(january1, ShouldHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4))) + this.pass(so(january2, ShouldHappenWithin, oneDay, january3)) + this.pass(so(january3, ShouldHappenWithin, oneDay, january3)) + this.pass(so(january4, ShouldHappenWithin, oneDay, january3)) + this.fail(so(january5, ShouldHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4))) +} + +func (this *AssertionsFixture) TestShouldNotHappenWithin() { + this.fail(so(0, ShouldNotHappenWithin), "This assertion requires exactly 2 comparison values (you provided 0).") + this.fail(so(0, ShouldNotHappenWithin, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + this.fail(so(0, ShouldNotHappenWithin, 1, 2), shouldUseDurationAndTime) + this.fail(so(0, ShouldNotHappenWithin, oneDay, time.Now()), shouldUseDurationAndTime) + this.fail(so(time.Now(), ShouldNotHappenWithin, 0, time.Now()), shouldUseDurationAndTime) + + this.pass(so(january1, ShouldNotHappenWithin, oneDay, january3)) + this.fail(so(january2, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january2), pretty(january2), pretty(january4))) + this.fail(so(january3, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january3), pretty(january2), pretty(january4))) + this.fail(so(january4, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january4), pretty(january2), pretty(january4))) + this.pass(so(january5, ShouldNotHappenWithin, oneDay, january3)) +} + +func (this *AssertionsFixture) TestShouldBeChronological() { + this.fail(so(0, ShouldBeChronological, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(0, ShouldBeChronological), shouldUseTimeSlice) + this.fail(so([]time.Time{january5, january1}, ShouldBeChronological), + "The 'Time' at index [1] should have happened after the previous one (but it didn't!):\n [0]: 2013-01-05 00:00:00 +0000 UTC\n [1]: 2013-01-01 00:00:00 +0000 UTC (see, it happened before!)") + + this.pass(so([]time.Time{january1, january2, january3, january4, january5}, ShouldBeChronological)) +} + +func (this *AssertionsFixture) TestShouldNotBeChronological() { + this.fail(so(0, ShouldNotBeChronological, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + this.fail(so(0, ShouldNotBeChronological), shouldUseTimeSlice) + this.fail(so([]time.Time{january1, january5}, ShouldNotBeChronological), + "The provided times should NOT be chronological, but they were.") + + this.pass(so([]time.Time{january2, january1, january3, january4, january5}, ShouldNotBeChronological)) +} + +const layout = "2006-01-02 15:04" + +var january1, _ = time.Parse(layout, "2013-01-01 00:00") +var january2, _ = time.Parse(layout, "2013-01-02 00:00") +var january3, _ = time.Parse(layout, "2013-01-03 00:00") +var january4, _ = time.Parse(layout, "2013-01-04 00:00") +var january5, _ = time.Parse(layout, "2013-01-05 00:00") + +var oneDay, _ = time.ParseDuration("24h0m0s") +var twoDays, _ = time.ParseDuration("48h0m0s") + +func pretty(t time.Time) string { + return fmt.Sprintf("%v", t) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/type.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/type.go new file mode 100644 index 0000000000..d2d1dc864b --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/type.go @@ -0,0 +1,134 @@ +package assertions + +import ( + "fmt" + "reflect" +) + +// ShouldHaveSameTypeAs receives exactly two parameters and compares their underlying types for equality. +func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + first := reflect.TypeOf(actual) + second := reflect.TypeOf(expected[0]) + + if first != second { + return serializer.serialize(second, first, fmt.Sprintf(shouldHaveBeenA, actual, second, first)) + } + + return success +} + +// ShouldNotHaveSameTypeAs receives exactly two parameters and compares their underlying types for inequality. +func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + first := reflect.TypeOf(actual) + second := reflect.TypeOf(expected[0]) + + if (actual == nil && expected[0] == nil) || first == second { + return fmt.Sprintf(shouldNotHaveBeenA, actual, second) + } + return success +} + +// ShouldImplement receives exactly two parameters and ensures +// that the first implements the interface type of the second. +func ShouldImplement(actual interface{}, expectedList ...interface{}) string { + if fail := need(1, expectedList); fail != success { + return fail + } + + expected := expectedList[0] + if fail := ShouldBeNil(expected); fail != success { + return shouldCompareWithInterfacePointer + } + + if fail := ShouldNotBeNil(actual); fail != success { + return shouldNotBeNilActual + } + + var actualType reflect.Type + if reflect.TypeOf(actual).Kind() != reflect.Ptr { + actualType = reflect.PtrTo(reflect.TypeOf(actual)) + } else { + actualType = reflect.TypeOf(actual) + } + + expectedType := reflect.TypeOf(expected) + if fail := ShouldNotBeNil(expectedType); fail != success { + return shouldCompareWithInterfacePointer + } + + expectedInterface := expectedType.Elem() + + if !actualType.Implements(expectedInterface) { + return fmt.Sprintf(shouldHaveImplemented, expectedInterface, actualType) + } + return success +} + +// ShouldNotImplement receives exactly two parameters and ensures +// that the first does NOT implement the interface type of the second. +func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string { + if fail := need(1, expectedList); fail != success { + return fail + } + + expected := expectedList[0] + if fail := ShouldBeNil(expected); fail != success { + return shouldCompareWithInterfacePointer + } + + if fail := ShouldNotBeNil(actual); fail != success { + return shouldNotBeNilActual + } + + var actualType reflect.Type + if reflect.TypeOf(actual).Kind() != reflect.Ptr { + actualType = reflect.PtrTo(reflect.TypeOf(actual)) + } else { + actualType = reflect.TypeOf(actual) + } + + expectedType := reflect.TypeOf(expected) + if fail := ShouldNotBeNil(expectedType); fail != success { + return shouldCompareWithInterfacePointer + } + + expectedInterface := expectedType.Elem() + + if actualType.Implements(expectedInterface) { + return fmt.Sprintf(shouldNotHaveImplemented, actualType, expectedInterface) + } + return success +} + +// ShouldBeError asserts that the first argument implements the error interface. +// It also compares the first argument against the second argument if provided +// (which must be an error message string or another error value). +func ShouldBeError(actual interface{}, expected ...interface{}) string { + if fail := atMost(1, expected); fail != success { + return fail + } + + if !isError(actual) { + return fmt.Sprintf(shouldBeError, reflect.TypeOf(actual)) + } + + if len(expected) == 0 { + return success + } + + if expected := expected[0]; !isString(expected) && !isError(expected) { + return fmt.Sprintf(shouldBeErrorInvalidComparisonValue, reflect.TypeOf(expected)) + } + return ShouldEqual(fmt.Sprint(actual), fmt.Sprint(expected[0])) +} + +func isString(value interface{}) bool { _, ok := value.(string); return ok } +func isError(value interface{}) bool { _, ok := value.(error); return ok } diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/type_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/type_test.go new file mode 100644 index 0000000000..25c3aae274 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/type_test.go @@ -0,0 +1,90 @@ +package assertions + +import ( + "bytes" + "errors" + "io" + "net/http" +) + +func (this *AssertionsFixture) TestShouldHaveSameTypeAs() { + this.fail(so(1, ShouldHaveSameTypeAs), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldHaveSameTypeAs, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(nil, ShouldHaveSameTypeAs, 0), "int||Expected '' to be: 'int' (but was: '')!") + this.fail(so(1, ShouldHaveSameTypeAs, "asdf"), "string|int|Expected '1' to be: 'string' (but was: 'int')!") + + this.pass(so(1, ShouldHaveSameTypeAs, 0)) + this.pass(so(nil, ShouldHaveSameTypeAs, nil)) +} + +func (this *AssertionsFixture) TestShouldNotHaveSameTypeAs() { + this.fail(so(1, ShouldNotHaveSameTypeAs), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(1, ShouldNotHaveSameTypeAs, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(1, ShouldNotHaveSameTypeAs, 0), "Expected '1' to NOT be: 'int' (but it was)!") + this.fail(so(nil, ShouldNotHaveSameTypeAs, nil), "Expected '' to NOT be: '' (but it was)!") + + this.pass(so(nil, ShouldNotHaveSameTypeAs, 0)) + this.pass(so(1, ShouldNotHaveSameTypeAs, "asdf")) +} + +func (this *AssertionsFixture) TestShouldImplement() { + var ioReader *io.Reader = nil + var response http.Response = http.Response{} + var responsePtr *http.Response = new(http.Response) + var reader = bytes.NewBufferString("") + + this.fail(so(reader, ShouldImplement), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(reader, ShouldImplement, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 2).") + this.fail(so(reader, ShouldImplement, ioReader, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(reader, ShouldImplement, "foo"), shouldCompareWithInterfacePointer) + this.fail(so(reader, ShouldImplement, 1), shouldCompareWithInterfacePointer) + this.fail(so(reader, ShouldImplement, nil), shouldCompareWithInterfacePointer) + + this.fail(so(nil, ShouldImplement, ioReader), shouldNotBeNilActual) + this.fail(so(1, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*int' does not implement the interface!") + + this.fail(so(response, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*http.Response' does not implement the interface!") + this.fail(so(responsePtr, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*http.Response' does not implement the interface!") + this.pass(so(reader, ShouldImplement, ioReader)) + this.pass(so(reader, ShouldImplement, (*io.Reader)(nil))) +} + +func (this *AssertionsFixture) TestShouldNotImplement() { + var ioReader *io.Reader = nil + var response http.Response = http.Response{} + var responsePtr *http.Response = new(http.Response) + var reader io.Reader = bytes.NewBufferString("") + + this.fail(so(reader, ShouldNotImplement), "This assertion requires exactly 1 comparison values (you provided 0).") + this.fail(so(reader, ShouldNotImplement, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 2).") + this.fail(so(reader, ShouldNotImplement, ioReader, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 3).") + + this.fail(so(reader, ShouldNotImplement, "foo"), shouldCompareWithInterfacePointer) + this.fail(so(reader, ShouldNotImplement, 1), shouldCompareWithInterfacePointer) + this.fail(so(reader, ShouldNotImplement, nil), shouldCompareWithInterfacePointer) + + this.fail(so(reader, ShouldNotImplement, ioReader), "Expected '*bytes.Buffer'\nto NOT implement 'io.Reader' (but it did)!") + this.fail(so(nil, ShouldNotImplement, ioReader), shouldNotBeNilActual) + this.pass(so(1, ShouldNotImplement, ioReader)) + this.pass(so(response, ShouldNotImplement, ioReader)) + this.pass(so(responsePtr, ShouldNotImplement, ioReader)) +} + +func (this *AssertionsFixture) TestShouldBeError() { + this.fail(so(nil, ShouldBeError, "too", "many"), "This assertion allows 1 or fewer comparison values (you provided 2).") + + this.fail(so(1, ShouldBeError), "Expected an error value (but was 'int' instead)!") + this.fail(so(nil, ShouldBeError), "Expected an error value (but was '' instead)!") + + error1 := errors.New("Message") + + this.fail(so(error1, ShouldBeError, 42), "The final argument to this assertion must be a string or an error value (you provided: 'int').") + this.fail(so(error1, ShouldBeError, "Wrong error message"), "Wrong error message|Message|Expected: 'Wrong error message' Actual: 'Message' (Should be equal)") + + this.pass(so(error1, ShouldBeError)) + this.pass(so(error1, ShouldBeError, error1)) + this.pass(so(error1, ShouldBeError, error1.Error())) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/utilities_for_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/utilities_for_test.go new file mode 100644 index 0000000000..43ea19dd5a --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/assertions/utilities_for_test.go @@ -0,0 +1,89 @@ +package assertions + +import ( + "fmt" + "strings" + "testing" + + "github.com/smartystreets/assertions/internal/unit" +) + +/**************************************************************************/ + +func TestAssertionsFixture(t *testing.T) { + unit.Run(new(AssertionsFixture), t) +} + +type AssertionsFixture struct { + *unit.Fixture +} + +func (this *AssertionsFixture) Setup() { + serializer = this +} + +func (self *AssertionsFixture) serialize(expected, actual interface{}, message string) string { + return fmt.Sprintf("%v|%v|%s", expected, actual, message) +} + +func (self *AssertionsFixture) serializeDetailed(expected, actual interface{}, message string) string { + return fmt.Sprintf("%v|%v|%s", expected, actual, message) +} + +func (this *AssertionsFixture) pass(result string) { + this.Assert(result == success, result) +} + +func (this *AssertionsFixture) fail(actual string, expected string) { + actual = format(actual) + expected = format(expected) + + if actual != expected { + if actual == "" { + actual = "(empty)" + } + this.Errorf("Expected: %s\nActual: %s\n", expected, actual) + } +} +func format(message string) string { + message = strings.Replace(message, "\n", " ", -1) + for strings.Contains(message, " ") { + message = strings.Replace(message, " ", " ", -1) + } + message = strings.Replace(message, "\x1b[32m", "", -1) + message = strings.Replace(message, "\x1b[31m", "", -1) + message = strings.Replace(message, "\x1b[0m", "", -1) + return message +} + +/**************************************************************************/ + +type Thing1 struct { + a string +} +type Thing2 struct { + a string +} + +type ThingInterface interface { + Hi() +} + +type ThingImplementation struct{} + +func (self *ThingImplementation) Hi() {} + +type IntAlias int +type StringAlias string +type StringSliceAlias []string +type StringStringMapAlias map[string]string + +/**************************************************************************/ + +type ThingWithEqualMethod struct { + a string +} + +func (this ThingWithEqualMethod) Equal(that ThingWithEqualMethod) bool { + return this.a == that.a +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.gitattributes b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.gitattributes new file mode 100644 index 0000000000..bc2c94aa5d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.gitattributes @@ -0,0 +1 @@ +web/client/resources/js/lib/* linguist-vendored diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.gitignore b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.gitignore new file mode 100644 index 0000000000..c9205c5335 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +Thumbs.db +examples/output.json +web/client/reports/ +/.idea \ No newline at end of file diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.travis.yml b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.travis.yml new file mode 100644 index 0000000000..7131d493f4 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - 1.2.x + - 1.3.x + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +install: + - go get -t ./... + +script: go test -short -v ./... + +sudo: false diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/CONTRIBUTING.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/CONTRIBUTING.md new file mode 100644 index 0000000000..cc0e8e8e40 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/CONTRIBUTING.md @@ -0,0 +1,35 @@ +# Subject: GoConvey maintainers wanted + +We'd like to open the project up to additional maintainers who want to move the project forward in a meaningful way. + +We've spent significant time at SmartyStreets building GoConvey and it has perfectly met (and exceeded) all of our initial design specifications. We've used it to great effect. Being so well-matched to our development workflows at SmartyStreets, we haven't had a need to hack on it lately. This had been frustrating to many in the community who have ideas for the project and would like to see new features released (and some old bugs fixed). The release of Go 1.5 and the new vendoring experiment has been a source of confusion and hassle for those who have already upgraded and find that GoConvey needs to be brought up to speed. + +GoConvey is a popular 2-pronged, open-source github project (1,600+ stargazers, 100+ forks): + +- A package you import in your test code that allows you to write BDD-style tests. +- An executable that runs a local web server which displays auto-updating test results in a web browser. + +---- + +- http://goconvey.co/ +- https://github.com/smartystreets/goconvey +- https://github.com/smartystreets/goconvey/wiki + +_I should mention that the [assertions package](https://github.com/smartystreets/assertions) imported by the convey package is used by other projects at SmartyStreets and so we will be continuing to maintain that project internally._ + +We hope to hear from you soon. Thanks! + +--- + +# Contributing + +In general, the code posted to the [SmartyStreets github organization](https://github.com/smartystreets) is created to solve specific problems at SmartyStreets that are ancillary to our core products in the address verification industry and may or may not be useful to other organizations or developers. Our reason for posting said code isn't necessarily to solicit feedback or contributions from the community but more as a showcase of some of the approaches to solving problems we have adopted. + +Having stated that, we do consider issues raised by other githubbers as well as contributions submitted via pull requests. When submitting such a pull request, please follow these guidelines: + +- _Look before you leap:_ If the changes you plan to make are significant, it's in everyone's best interest for you to discuss them with a SmartyStreets team member prior to opening a pull request. +- _License and ownership:_ If modifying the `LICENSE.md` file, limit your changes to fixing typographical mistakes. Do NOT modify the actual terms in the license or the copyright by **SmartyStreets, LLC**. Code submitted to SmartyStreets projects becomes property of SmartyStreets and must be compatible with the associated license. +- _Testing:_ If the code you are submitting resides in packages/modules covered by automated tests, be sure to add passing tests that cover your changes and assert expected behavior and state. Submit the additional test cases as part of your change set. +- _Style:_ Match your approach to **naming** and **formatting** with the surrounding code. Basically, the code you submit shouldn't stand out. + - "Naming" refers to such constructs as variables, methods, functions, classes, structs, interfaces, packages, modules, directories, files, etc... + - "Formatting" refers to such constructs as whitespace, horizontal line length, vertical function length, vertical file length, indentation, curly braces, etc... diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/LICENSE.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/LICENSE.md new file mode 100644 index 0000000000..3f87a40e77 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/LICENSE.md @@ -0,0 +1,23 @@ +Copyright (c) 2016 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/README.md new file mode 100644 index 0000000000..00df48066f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/README.md @@ -0,0 +1,124 @@ +GoConvey is awesome Go testing +============================== + +[![Build Status](https://travis-ci.org/smartystreets/goconvey.png)](https://travis-ci.org/smartystreets/goconvey) +[![GoDoc](https://godoc.org/github.com/smartystreets/goconvey?status.svg)](http://godoc.org/github.com/smartystreets/goconvey) + + +Welcome to GoConvey, a yummy Go testing tool for gophers. Works with `go test`. Use it in the terminal or browser according to your viewing pleasure. **[View full feature tour.](http://goconvey.co)** + +**Features:** + +- Directly integrates with `go test` +- Fully-automatic web UI (works with native Go tests, too) +- Huge suite of regression tests +- Shows test coverage (Go 1.2+) +- Readable, colorized console output (understandable by any manager, IT or not) +- Test code generator +- Desktop notifications (optional) +- Immediately open problem lines in [Sublime Text](http://www.sublimetext.com) ([some assembly required](https://github.com/asuth/subl-handler)) + + +You can ask questions about how to use GoConvey on [StackOverflow](http://stackoverflow.com/questions/ask?tags=goconvey,go&title=GoConvey%3A%20). Use the tags `go` and `goconvey`. + +**Menu:** + +- [Installation](#installation) +- [Quick start](#quick-start) +- [Documentation](#documentation) +- [Screenshots](#screenshots) +- [Contributors](#contributors) + + + + +Installation +------------ + + $ go get github.com/smartystreets/goconvey + +[Quick start](https://github.com/smartystreets/goconvey/wiki#get-going-in-25-seconds) +----------- + +Make a test, for example: + +```go +package package_name + +import ( + "testing" + . "github.com/smartystreets/goconvey/convey" +) + +func TestSpec(t *testing.T) { + + // Only pass t into top-level Convey calls + Convey("Given some integer with a starting value", t, func() { + x := 1 + + Convey("When the integer is incremented", func() { + x++ + + Convey("The value should be greater by one", func() { + So(x, ShouldEqual, 2) + }) + }) + }) +} +``` + + +#### [In the browser](https://github.com/smartystreets/goconvey/wiki/Web-UI) + +Start up the GoConvey web server at your project's path: + + $ $GOPATH/bin/goconvey + +Then watch the test results display in your browser at: + + http://localhost:8080 + + +If the browser doesn't open automatically, please click [http://localhost:8080](http://localhost:8080) to open manually. + +There you have it. +![](http://d79i1fxsrar4t.cloudfront.net/goconvey.co/gc-1-dark.png) +As long as GoConvey is running, test results will automatically update in your browser window. + +![](http://d79i1fxsrar4t.cloudfront.net/goconvey.co/gc-5-dark.png) +The design is responsive, so you can squish the browser real tight if you need to put it beside your code. + + +The [web UI](https://github.com/smartystreets/goconvey/wiki/Web-UI) supports traditional Go tests, so use it even if you're not using GoConvey tests. + + + +#### [In the terminal](https://github.com/smartystreets/goconvey/wiki/Execution) + +Just do what you do best: + + $ go test + +Or if you want the output to include the story: + + $ go test -v + + +[Documentation](https://github.com/smartystreets/goconvey/wiki) +----------- + +Check out the + +- [GoConvey wiki](https://github.com/smartystreets/goconvey/wiki), +- [![GoDoc](https://godoc.org/github.com/smartystreets/goconvey?status.png)](http://godoc.org/github.com/smartystreets/goconvey) +- and the *_test.go files scattered throughout this project. + +[Screenshots](http://goconvey.co) +----------- + +For web UI and terminal screenshots, check out [the full feature tour](http://goconvey.co). + +Contributors +---------------------- + +GoConvey is brought to you by [SmartyStreets](https://github.com/smartystreets) and [several contributors](https://github.com/smartystreets/goconvey/graphs/contributors) (Thanks!). diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/assertions.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/assertions.go new file mode 100644 index 0000000000..97e3bec82e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/assertions.go @@ -0,0 +1,71 @@ +package convey + +import "github.com/smartystreets/assertions" + +var ( + ShouldEqual = assertions.ShouldEqual + ShouldNotEqual = assertions.ShouldNotEqual + ShouldAlmostEqual = assertions.ShouldAlmostEqual + ShouldNotAlmostEqual = assertions.ShouldNotAlmostEqual + ShouldResemble = assertions.ShouldResemble + ShouldNotResemble = assertions.ShouldNotResemble + ShouldPointTo = assertions.ShouldPointTo + ShouldNotPointTo = assertions.ShouldNotPointTo + ShouldBeNil = assertions.ShouldBeNil + ShouldNotBeNil = assertions.ShouldNotBeNil + ShouldBeTrue = assertions.ShouldBeTrue + ShouldBeFalse = assertions.ShouldBeFalse + ShouldBeZeroValue = assertions.ShouldBeZeroValue + ShouldNotBeZeroValue = assertions.ShouldNotBeZeroValue + + ShouldBeGreaterThan = assertions.ShouldBeGreaterThan + ShouldBeGreaterThanOrEqualTo = assertions.ShouldBeGreaterThanOrEqualTo + ShouldBeLessThan = assertions.ShouldBeLessThan + ShouldBeLessThanOrEqualTo = assertions.ShouldBeLessThanOrEqualTo + ShouldBeBetween = assertions.ShouldBeBetween + ShouldNotBeBetween = assertions.ShouldNotBeBetween + ShouldBeBetweenOrEqual = assertions.ShouldBeBetweenOrEqual + ShouldNotBeBetweenOrEqual = assertions.ShouldNotBeBetweenOrEqual + + ShouldContain = assertions.ShouldContain + ShouldNotContain = assertions.ShouldNotContain + ShouldContainKey = assertions.ShouldContainKey + ShouldNotContainKey = assertions.ShouldNotContainKey + ShouldBeIn = assertions.ShouldBeIn + ShouldNotBeIn = assertions.ShouldNotBeIn + ShouldBeEmpty = assertions.ShouldBeEmpty + ShouldNotBeEmpty = assertions.ShouldNotBeEmpty + ShouldHaveLength = assertions.ShouldHaveLength + + ShouldStartWith = assertions.ShouldStartWith + ShouldNotStartWith = assertions.ShouldNotStartWith + ShouldEndWith = assertions.ShouldEndWith + ShouldNotEndWith = assertions.ShouldNotEndWith + ShouldBeBlank = assertions.ShouldBeBlank + ShouldNotBeBlank = assertions.ShouldNotBeBlank + ShouldContainSubstring = assertions.ShouldContainSubstring + ShouldNotContainSubstring = assertions.ShouldNotContainSubstring + + ShouldPanic = assertions.ShouldPanic + ShouldNotPanic = assertions.ShouldNotPanic + ShouldPanicWith = assertions.ShouldPanicWith + ShouldNotPanicWith = assertions.ShouldNotPanicWith + + ShouldHaveSameTypeAs = assertions.ShouldHaveSameTypeAs + ShouldNotHaveSameTypeAs = assertions.ShouldNotHaveSameTypeAs + ShouldImplement = assertions.ShouldImplement + ShouldNotImplement = assertions.ShouldNotImplement + + ShouldHappenBefore = assertions.ShouldHappenBefore + ShouldHappenOnOrBefore = assertions.ShouldHappenOnOrBefore + ShouldHappenAfter = assertions.ShouldHappenAfter + ShouldHappenOnOrAfter = assertions.ShouldHappenOnOrAfter + ShouldHappenBetween = assertions.ShouldHappenBetween + ShouldHappenOnOrBetween = assertions.ShouldHappenOnOrBetween + ShouldNotHappenOnOrBetween = assertions.ShouldNotHappenOnOrBetween + ShouldHappenWithin = assertions.ShouldHappenWithin + ShouldNotHappenWithin = assertions.ShouldNotHappenWithin + ShouldBeChronological = assertions.ShouldBeChronological + + ShouldBeError = assertions.ShouldBeError +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/context.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/context.go new file mode 100644 index 0000000000..2c75c2d7b1 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/context.go @@ -0,0 +1,272 @@ +package convey + +import ( + "fmt" + + "github.com/jtolds/gls" + "github.com/smartystreets/goconvey/convey/reporting" +) + +type conveyErr struct { + fmt string + params []interface{} +} + +func (e *conveyErr) Error() string { + return fmt.Sprintf(e.fmt, e.params...) +} + +func conveyPanic(fmt string, params ...interface{}) { + panic(&conveyErr{fmt, params}) +} + +const ( + missingGoTest = `Top-level calls to Convey(...) need a reference to the *testing.T. + Hint: Convey("description here", t, func() { /* notice that the second argument was the *testing.T (t)! */ }) ` + extraGoTest = `Only the top-level call to Convey(...) needs a reference to the *testing.T.` + noStackContext = "Convey operation made without context on goroutine stack.\n" + + "Hint: Perhaps you meant to use `Convey(..., func(c C){...})` ?" + differentConveySituations = "Different set of Convey statements on subsequent pass!\nDid not expect %#v." + multipleIdenticalConvey = "Multiple convey suites with identical names: %#v" +) + +const ( + failureHalt = "___FAILURE_HALT___" + + nodeKey = "node" +) + +///////////////////////////////// Stack Context ///////////////////////////////// + +func getCurrentContext() *context { + ctx, ok := ctxMgr.GetValue(nodeKey) + if ok { + return ctx.(*context) + } + return nil +} + +func mustGetCurrentContext() *context { + ctx := getCurrentContext() + if ctx == nil { + conveyPanic(noStackContext) + } + return ctx +} + +//////////////////////////////////// Context //////////////////////////////////// + +// context magically handles all coordination of Convey's and So assertions. +// +// It is tracked on the stack as goroutine-local-storage with the gls package, +// or explicitly if the user decides to call convey like: +// +// Convey(..., func(c C) { +// c.So(...) +// }) +// +// This implements the `C` interface. +type context struct { + reporter reporting.Reporter + + children map[string]*context + + resets []func() + + executedOnce bool + expectChildRun *bool + complete bool + + focus bool + failureMode FailureMode +} + +// rootConvey is the main entry point to a test suite. This is called when +// there's no context in the stack already, and items must contain a `t` object, +// or this panics. +func rootConvey(items ...interface{}) { + entry := discover(items) + + if entry.Test == nil { + conveyPanic(missingGoTest) + } + + expectChildRun := true + ctx := &context{ + reporter: buildReporter(), + + children: make(map[string]*context), + + expectChildRun: &expectChildRun, + + focus: entry.Focus, + failureMode: defaultFailureMode.combine(entry.FailMode), + } + ctxMgr.SetValues(gls.Values{nodeKey: ctx}, func() { + ctx.reporter.BeginStory(reporting.NewStoryReport(entry.Test)) + defer ctx.reporter.EndStory() + + for ctx.shouldVisit() { + ctx.conveyInner(entry.Situation, entry.Func) + expectChildRun = true + } + }) +} + +//////////////////////////////////// Methods //////////////////////////////////// + +func (ctx *context) SkipConvey(items ...interface{}) { + ctx.Convey(items, skipConvey) +} + +func (ctx *context) FocusConvey(items ...interface{}) { + ctx.Convey(items, focusConvey) +} + +func (ctx *context) Convey(items ...interface{}) { + entry := discover(items) + + // we're a branch, or leaf (on the wind) + if entry.Test != nil { + conveyPanic(extraGoTest) + } + if ctx.focus && !entry.Focus { + return + } + + var inner_ctx *context + if ctx.executedOnce { + var ok bool + inner_ctx, ok = ctx.children[entry.Situation] + if !ok { + conveyPanic(differentConveySituations, entry.Situation) + } + } else { + if _, ok := ctx.children[entry.Situation]; ok { + conveyPanic(multipleIdenticalConvey, entry.Situation) + } + inner_ctx = &context{ + reporter: ctx.reporter, + + children: make(map[string]*context), + + expectChildRun: ctx.expectChildRun, + + focus: entry.Focus, + failureMode: ctx.failureMode.combine(entry.FailMode), + } + ctx.children[entry.Situation] = inner_ctx + } + + if inner_ctx.shouldVisit() { + ctxMgr.SetValues(gls.Values{nodeKey: inner_ctx}, func() { + inner_ctx.conveyInner(entry.Situation, entry.Func) + }) + } +} + +func (ctx *context) SkipSo(stuff ...interface{}) { + ctx.assertionReport(reporting.NewSkipReport()) +} + +func (ctx *context) So(actual interface{}, assert assertion, expected ...interface{}) { + if result := assert(actual, expected...); result == assertionSuccess { + ctx.assertionReport(reporting.NewSuccessReport()) + } else { + ctx.assertionReport(reporting.NewFailureReport(result)) + } +} + +func (ctx *context) Reset(action func()) { + /* TODO: Failure mode configuration */ + ctx.resets = append(ctx.resets, action) +} + +func (ctx *context) Print(items ...interface{}) (int, error) { + fmt.Fprint(ctx.reporter, items...) + return fmt.Print(items...) +} + +func (ctx *context) Println(items ...interface{}) (int, error) { + fmt.Fprintln(ctx.reporter, items...) + return fmt.Println(items...) +} + +func (ctx *context) Printf(format string, items ...interface{}) (int, error) { + fmt.Fprintf(ctx.reporter, format, items...) + return fmt.Printf(format, items...) +} + +//////////////////////////////////// Private //////////////////////////////////// + +// shouldVisit returns true iff we should traverse down into a Convey. Note +// that just because we don't traverse a Convey this time, doesn't mean that +// we may not traverse it on a subsequent pass. +func (c *context) shouldVisit() bool { + return !c.complete && *c.expectChildRun +} + +// conveyInner is the function which actually executes the user's anonymous test +// function body. At this point, Convey or RootConvey has decided that this +// function should actually run. +func (ctx *context) conveyInner(situation string, f func(C)) { + // Record/Reset state for next time. + defer func() { + ctx.executedOnce = true + + // This is only needed at the leaves, but there's no harm in also setting it + // when returning from branch Convey's + *ctx.expectChildRun = false + }() + + // Set up+tear down our scope for the reporter + ctx.reporter.Enter(reporting.NewScopeReport(situation)) + defer ctx.reporter.Exit() + + // Recover from any panics in f, and assign the `complete` status for this + // node of the tree. + defer func() { + ctx.complete = true + if problem := recover(); problem != nil { + if problem, ok := problem.(*conveyErr); ok { + panic(problem) + } + if problem != failureHalt { + ctx.reporter.Report(reporting.NewErrorReport(problem)) + } + } else { + for _, child := range ctx.children { + if !child.complete { + ctx.complete = false + return + } + } + } + }() + + // Resets are registered as the `f` function executes, so nil them here. + // All resets are run in registration order (FIFO). + ctx.resets = []func(){} + defer func() { + for _, r := range ctx.resets { + // panics handled by the previous defer + r() + } + }() + + if f == nil { + // if f is nil, this was either a Convey(..., nil), or a SkipConvey + ctx.reporter.Report(reporting.NewSkipReport()) + } else { + f(ctx) + } +} + +// assertionReport is a helper for So and SkipSo which makes the report and +// then possibly panics, depending on the current context's failureMode. +func (ctx *context) assertionReport(r *reporting.AssertionResult) { + ctx.reporter.Report(r) + if r.Failure != "" && ctx.failureMode == FailureHalts { + panic(failureHalt) + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/convey.goconvey b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/convey.goconvey new file mode 100644 index 0000000000..a2d9327dc9 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/convey.goconvey @@ -0,0 +1,4 @@ +#ignore +-timeout=1s +#-covermode=count +#-coverpkg=github.com/smartystreets/goconvey/convey,github.com/smartystreets/goconvey/convey/gotest,github.com/smartystreets/goconvey/convey/reporting \ No newline at end of file diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/discovery.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/discovery.go new file mode 100644 index 0000000000..eb8d4cb2ce --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/discovery.go @@ -0,0 +1,103 @@ +package convey + +type actionSpecifier uint8 + +const ( + noSpecifier actionSpecifier = iota + skipConvey + focusConvey +) + +type suite struct { + Situation string + Test t + Focus bool + Func func(C) // nil means skipped + FailMode FailureMode +} + +func newSuite(situation string, failureMode FailureMode, f func(C), test t, specifier actionSpecifier) *suite { + ret := &suite{ + Situation: situation, + Test: test, + Func: f, + FailMode: failureMode, + } + switch specifier { + case skipConvey: + ret.Func = nil + case focusConvey: + ret.Focus = true + } + return ret +} + +func discover(items []interface{}) *suite { + name, items := parseName(items) + test, items := parseGoTest(items) + failure, items := parseFailureMode(items) + action, items := parseAction(items) + specifier, items := parseSpecifier(items) + + if len(items) != 0 { + conveyPanic(parseError) + } + + return newSuite(name, failure, action, test, specifier) +} +func item(items []interface{}) interface{} { + if len(items) == 0 { + conveyPanic(parseError) + } + return items[0] +} +func parseName(items []interface{}) (string, []interface{}) { + if name, parsed := item(items).(string); parsed { + return name, items[1:] + } + conveyPanic(parseError) + panic("never get here") +} +func parseGoTest(items []interface{}) (t, []interface{}) { + if test, parsed := item(items).(t); parsed { + return test, items[1:] + } + return nil, items +} +func parseFailureMode(items []interface{}) (FailureMode, []interface{}) { + if mode, parsed := item(items).(FailureMode); parsed { + return mode, items[1:] + } + return FailureInherits, items +} +func parseAction(items []interface{}) (func(C), []interface{}) { + switch x := item(items).(type) { + case nil: + return nil, items[1:] + case func(C): + return x, items[1:] + case func(): + return func(C) { x() }, items[1:] + } + conveyPanic(parseError) + panic("never get here") +} +func parseSpecifier(items []interface{}) (actionSpecifier, []interface{}) { + if len(items) == 0 { + return noSpecifier, items + } + if spec, ok := items[0].(actionSpecifier); ok { + return spec, items[1:] + } + conveyPanic(parseError) + panic("never get here") +} + +// This interface allows us to pass the *testing.T struct +// throughout the internals of this package without ever +// having to import the "testing" package. +type t interface { + Fail() +} + +const parseError = "You must provide a name (string), then a *testing.T (if in outermost scope), an optional FailureMode, and then an action (func())." diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/doc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/doc.go new file mode 100644 index 0000000000..a60e32ae49 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/doc.go @@ -0,0 +1,218 @@ +// Package convey contains all of the public-facing entry points to this project. +// This means that it should never be required of the user to import any other +// packages from this project as they serve internal purposes. +package convey + +import "github.com/smartystreets/goconvey/convey/reporting" + +////////////////////////////////// suite ////////////////////////////////// + +// C is the Convey context which you can optionally obtain in your action +// by calling Convey like: +// +// Convey(..., func(c C) { +// ... +// }) +// +// See the documentation on Convey for more details. +// +// All methods in this context behave identically to the global functions of the +// same name in this package. +type C interface { + Convey(items ...interface{}) + SkipConvey(items ...interface{}) + FocusConvey(items ...interface{}) + + So(actual interface{}, assert assertion, expected ...interface{}) + SkipSo(stuff ...interface{}) + + Reset(action func()) + + Println(items ...interface{}) (int, error) + Print(items ...interface{}) (int, error) + Printf(format string, items ...interface{}) (int, error) +} + +// Convey is the method intended for use when declaring the scopes of +// a specification. Each scope has a description and a func() which may contain +// other calls to Convey(), Reset() or Should-style assertions. Convey calls can +// be nested as far as you see fit. +// +// IMPORTANT NOTE: The top-level Convey() within a Test method +// must conform to the following signature: +// +// Convey(description string, t *testing.T, action func()) +// +// All other calls should look like this (no need to pass in *testing.T): +// +// Convey(description string, action func()) +// +// Don't worry, goconvey will panic if you get it wrong so you can fix it. +// +// Additionally, you may explicitly obtain access to the Convey context by doing: +// +// Convey(description string, action func(c C)) +// +// You may need to do this if you want to pass the context through to a +// goroutine, or to close over the context in a handler to a library which +// calls your handler in a goroutine (httptest comes to mind). +// +// All Convey()-blocks also accept an optional parameter of FailureMode which sets +// how goconvey should treat failures for So()-assertions in the block and +// nested blocks. See the constants in this file for the available options. +// +// By default it will inherit from its parent block and the top-level blocks +// default to the FailureHalts setting. +// +// This parameter is inserted before the block itself: +// +// Convey(description string, t *testing.T, mode FailureMode, action func()) +// Convey(description string, mode FailureMode, action func()) +// +// See the examples package for, well, examples. +func Convey(items ...interface{}) { + if ctx := getCurrentContext(); ctx == nil { + rootConvey(items...) + } else { + ctx.Convey(items...) + } +} + +// SkipConvey is analagous to Convey except that the scope is not executed +// (which means that child scopes defined within this scope are not run either). +// The reporter will be notified that this step was skipped. +func SkipConvey(items ...interface{}) { + Convey(append(items, skipConvey)...) +} + +// FocusConvey is has the inverse effect of SkipConvey. If the top-level +// Convey is changed to `FocusConvey`, only nested scopes that are defined +// with FocusConvey will be run. The rest will be ignored completely. This +// is handy when debugging a large suite that runs a misbehaving function +// repeatedly as you can disable all but one of that function +// without swaths of `SkipConvey` calls, just a targeted chain of calls +// to FocusConvey. +func FocusConvey(items ...interface{}) { + Convey(append(items, focusConvey)...) +} + +// Reset registers a cleanup function to be run after each Convey() +// in the same scope. See the examples package for a simple use case. +func Reset(action func()) { + mustGetCurrentContext().Reset(action) +} + +/////////////////////////////////// Assertions /////////////////////////////////// + +// assertion is an alias for a function with a signature that the convey.So() +// method can handle. Any future or custom assertions should conform to this +// method signature. The return value should be an empty string if the assertion +// passes and a well-formed failure message if not. +type assertion func(actual interface{}, expected ...interface{}) string + +const assertionSuccess = "" + +// So is the means by which assertions are made against the system under test. +// The majority of exported names in the assertions package begin with the word +// 'Should' and describe how the first argument (actual) should compare with any +// of the final (expected) arguments. How many final arguments are accepted +// depends on the particular assertion that is passed in as the assert argument. +// See the examples package for use cases and the assertions package for +// documentation on specific assertion methods. A failing assertion will +// cause t.Fail() to be invoked--you should never call this method (or other +// failure-inducing methods) in your test code. Leave that to GoConvey. +func So(actual interface{}, assert assertion, expected ...interface{}) { + mustGetCurrentContext().So(actual, assert, expected...) +} + +// SkipSo is analagous to So except that the assertion that would have been passed +// to So is not executed and the reporter is notified that the assertion was skipped. +func SkipSo(stuff ...interface{}) { + mustGetCurrentContext().SkipSo() +} + +// FailureMode is a type which determines how the So() blocks should fail +// if their assertion fails. See constants further down for acceptable values +type FailureMode string + +const ( + + // FailureContinues is a failure mode which prevents failing + // So()-assertions from halting Convey-block execution, instead + // allowing the test to continue past failing So()-assertions. + FailureContinues FailureMode = "continue" + + // FailureHalts is the default setting for a top-level Convey()-block + // and will cause all failing So()-assertions to halt further execution + // in that test-arm and continue on to the next arm. + FailureHalts FailureMode = "halt" + + // FailureInherits is the default setting for failure-mode, it will + // default to the failure-mode of the parent block. You should never + // need to specify this mode in your tests.. + FailureInherits FailureMode = "inherits" +) + +func (f FailureMode) combine(other FailureMode) FailureMode { + if other == FailureInherits { + return f + } + return other +} + +var defaultFailureMode FailureMode = FailureHalts + +// SetDefaultFailureMode allows you to specify the default failure mode +// for all Convey blocks. It is meant to be used in an init function to +// allow the default mode to be changdd across all tests for an entire packgae +// but it can be used anywhere. +func SetDefaultFailureMode(mode FailureMode) { + if mode == FailureContinues || mode == FailureHalts { + defaultFailureMode = mode + } else { + panic("You may only use the constants named 'FailureContinues' and 'FailureHalts' as default failure modes.") + } +} + +//////////////////////////////////// Print functions //////////////////////////////////// + +// Print is analogous to fmt.Print (and it even calls fmt.Print). It ensures that +// output is aligned with the corresponding scopes in the web UI. +func Print(items ...interface{}) (written int, err error) { + return mustGetCurrentContext().Print(items...) +} + +// Print is analogous to fmt.Println (and it even calls fmt.Println). It ensures that +// output is aligned with the corresponding scopes in the web UI. +func Println(items ...interface{}) (written int, err error) { + return mustGetCurrentContext().Println(items...) +} + +// Print is analogous to fmt.Printf (and it even calls fmt.Printf). It ensures that +// output is aligned with the corresponding scopes in the web UI. +func Printf(format string, items ...interface{}) (written int, err error) { + return mustGetCurrentContext().Printf(format, items...) +} + +/////////////////////////////////////////////////////////////////////////////// + +// SuppressConsoleStatistics prevents automatic printing of console statistics. +// Calling PrintConsoleStatistics explicitly will force printing of statistics. +func SuppressConsoleStatistics() { + reporting.SuppressConsoleStatistics() +} + +// PrintConsoleStatistics may be called at any time to print assertion statistics. +// Generally, the best place to do this would be in a TestMain function, +// after all tests have been run. Something like this: +// +// func TestMain(m *testing.M) { +// convey.SuppressConsoleStatistics() +// result := m.Run() +// convey.PrintConsoleStatistics() +// os.Exit(result) +// } +// +func PrintConsoleStatistics() { + reporting.PrintConsoleStatistics() +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/focused_execution_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/focused_execution_test.go new file mode 100644 index 0000000000..294e32fa17 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/focused_execution_test.go @@ -0,0 +1,72 @@ +package convey + +import "testing" + +func TestFocusOnlyAtTopLevel(t *testing.T) { + output := prepare() + + FocusConvey("hi", t, func() { + output += "done" + }) + + expectEqual(t, "done", output) +} + +func TestFocus(t *testing.T) { + output := prepare() + + FocusConvey("hi", t, func() { + output += "1" + + Convey("bye", func() { + output += "2" + }) + }) + + expectEqual(t, "1", output) +} + +func TestNestedFocus(t *testing.T) { + output := prepare() + + FocusConvey("hi", t, func() { + output += "1" + + Convey("This shouldn't run", func() { + output += "boink!" + }) + + FocusConvey("This should run", func() { + output += "2" + + FocusConvey("The should run too", func() { + output += "3" + + }) + + Convey("The should NOT run", func() { + output += "blah blah blah!" + }) + }) + }) + + expectEqual(t, "123", output) +} + +func TestForgotTopLevelFocus(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + + FocusConvey("This will be run because the top-level lacks Focus", func() { + output += "2" + }) + + Convey("3", func() { + output += "3" + }) + }) + + expectEqual(t, "1213", output) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/gotest/doc_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/gotest/doc_test.go new file mode 100644 index 0000000000..1b6406be99 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/gotest/doc_test.go @@ -0,0 +1 @@ +package gotest diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/gotest/utils.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/gotest/utils.go new file mode 100644 index 0000000000..167c8fb74a --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/gotest/utils.go @@ -0,0 +1,28 @@ +// Package gotest contains internal functionality. Although this package +// contains one or more exported names it is not intended for public +// consumption. See the examples package for how to use this project. +package gotest + +import ( + "runtime" + "strings" +) + +func ResolveExternalCaller() (file string, line int, name string) { + var caller_id uintptr + callers := runtime.Callers(0, callStack) + + for x := 0; x < callers; x++ { + caller_id, file, line, _ = runtime.Caller(x) + if strings.HasSuffix(file, "_test.go") || strings.HasSuffix(file, "_tests.go") { + name = runtime.FuncForPC(caller_id).Name() + return + } + } + file, line, name = "", -1, "" + return // panic? +} + +const maxStackDepth = 100 // This had better be enough... + +var callStack []uintptr = make([]uintptr, maxStackDepth, maxStackDepth) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/init.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/init.go new file mode 100644 index 0000000000..cb930a0db4 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/init.go @@ -0,0 +1,81 @@ +package convey + +import ( + "flag" + "os" + + "github.com/jtolds/gls" + "github.com/smartystreets/assertions" + "github.com/smartystreets/goconvey/convey/reporting" +) + +func init() { + assertions.GoConveyMode(true) + + declareFlags() + + ctxMgr = gls.NewContextManager() +} + +func declareFlags() { + flag.BoolVar(&json, "convey-json", false, "When true, emits results in JSON blocks. Default: 'false'") + flag.BoolVar(&silent, "convey-silent", false, "When true, all output from GoConvey is suppressed.") + flag.BoolVar(&story, "convey-story", false, "When true, emits story output, otherwise emits dot output. When not provided, this flag mirrors the value of the '-test.v' flag") + + if noStoryFlagProvided() { + story = verboseEnabled + } + + // FYI: flag.Parse() is called from the testing package. +} + +func noStoryFlagProvided() bool { + return !story && !storyDisabled +} + +func buildReporter() reporting.Reporter { + selectReporter := os.Getenv("GOCONVEY_REPORTER") + + switch { + case testReporter != nil: + return testReporter + case json || selectReporter == "json": + return reporting.BuildJsonReporter() + case silent || selectReporter == "silent": + return reporting.BuildSilentReporter() + case selectReporter == "dot": + // Story is turned on when verbose is set, so we need to check for dot reporter first. + return reporting.BuildDotReporter() + case story || selectReporter == "story": + return reporting.BuildStoryReporter() + default: + return reporting.BuildDotReporter() + } +} + +var ( + ctxMgr *gls.ContextManager + + // only set by internal tests + testReporter reporting.Reporter +) + +var ( + json bool + silent bool + story bool + + verboseEnabled = flagFound("-test.v=true") + storyDisabled = flagFound("-story=false") +) + +// flagFound parses the command line args manually for flags defined in other +// packages. Like the '-v' flag from the "testing" package, for instance. +func flagFound(flagValue string) bool { + for _, arg := range os.Args { + if arg == flagValue { + return true + } + } + return false +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/isolated_execution_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/isolated_execution_test.go new file mode 100644 index 0000000000..7e22b3caa5 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/isolated_execution_test.go @@ -0,0 +1,774 @@ +package convey + +import ( + "strconv" + "testing" + "time" +) + +func TestSingleScope(t *testing.T) { + output := prepare() + + Convey("hi", t, func() { + output += "done" + }) + + expectEqual(t, "done", output) +} + +func TestSingleScopeWithMultipleConveys(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + }) + + Convey("2", t, func() { + output += "2" + }) + + expectEqual(t, "12", output) +} + +func TestNestedScopes(t *testing.T) { + output := prepare() + + Convey("a", t, func() { + output += "a " + + Convey("bb", func() { + output += "bb " + + Convey("ccc", func() { + output += "ccc | " + }) + }) + }) + + expectEqual(t, "a bb ccc | ", output) +} + +func TestNestedScopesWithIsolatedExecution(t *testing.T) { + output := prepare() + + Convey("a", t, func() { + output += "a " + + Convey("aa", func() { + output += "aa " + + Convey("aaa", func() { + output += "aaa | " + }) + + Convey("aaa1", func() { + output += "aaa1 | " + }) + }) + + Convey("ab", func() { + output += "ab " + + Convey("abb", func() { + output += "abb | " + }) + }) + }) + + expectEqual(t, "a aa aaa | a aa aaa1 | a ab abb | ", output) +} + +func TestSingleScopeWithConveyAndNestedReset(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + + Reset(func() { + output += "a" + }) + }) + + expectEqual(t, "1a", output) +} + +func TestPanicingReset(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + + Reset(func() { + panic("nooo") + }) + + Convey("runs since the reset hasn't yet", func() { + output += "a" + }) + + Convey("but this doesnt", func() { + output += "nope" + }) + }) + + expectEqual(t, "1a", output) +} + +func TestSingleScopeWithMultipleRegistrationsAndReset(t *testing.T) { + output := prepare() + + Convey("reset after each nested convey", t, func() { + Convey("first output", func() { + output += "1" + }) + + Convey("second output", func() { + output += "2" + }) + + Reset(func() { + output += "a" + }) + }) + + expectEqual(t, "1a2a", output) +} + +func TestSingleScopeWithMultipleRegistrationsAndMultipleResets(t *testing.T) { + output := prepare() + + Convey("each reset is run at end of each nested convey", t, func() { + Convey("1", func() { + output += "1" + }) + + Convey("2", func() { + output += "2" + }) + + Reset(func() { + output += "a" + }) + + Reset(func() { + output += "b" + }) + }) + + expectEqual(t, "1ab2ab", output) +} + +func Test_Failure_AtHigherLevelScopePreventsChildScopesFromRunning(t *testing.T) { + output := prepare() + + Convey("This step fails", t, func() { + So(1, ShouldEqual, 2) + + Convey("this should NOT be executed", func() { + output += "a" + }) + }) + + expectEqual(t, "", output) +} + +func Test_Panic_AtHigherLevelScopePreventsChildScopesFromRunning(t *testing.T) { + output := prepare() + + Convey("This step panics", t, func() { + Convey("this happens, because the panic didn't happen yet", func() { + output += "1" + }) + + output += "a" + + Convey("this should NOT be executed", func() { + output += "2" + }) + + output += "b" + + panic("Hi") + + output += "nope" + }) + + expectEqual(t, "1ab", output) +} + +func Test_Panic_InChildScopeDoes_NOT_PreventExecutionOfSiblingScopes(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step panics", func() { + panic("Hi") + output += "1" + }) + + Convey("This sibling should execute", func() { + output += "2" + }) + }) + + expectEqual(t, "2", output) +} + +func Test_Failure_InChildScopeDoes_NOT_PreventExecutionOfSiblingScopes(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step fails", func() { + So(1, ShouldEqual, 2) + output += "1" + }) + + Convey("This sibling should execute", func() { + output += "2" + }) + }) + + expectEqual(t, "2", output) +} + +func TestResetsAreAlwaysExecutedAfterScope_Panics(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step panics", func() { + panic("Hi") + output += "1" + }) + + Convey("This sibling step does not panic", func() { + output += "a" + + Reset(func() { + output += "b" + }) + }) + + Reset(func() { + output += "2" + }) + }) + + expectEqual(t, "2ab2", output) +} + +func TestResetsAreAlwaysExecutedAfterScope_Failures(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step fails", func() { + So(1, ShouldEqual, 2) + output += "1" + }) + + Convey("This sibling step does not fail", func() { + output += "a" + + Reset(func() { + output += "b" + }) + }) + + Reset(func() { + output += "2" + }) + }) + + expectEqual(t, "2ab2", output) +} + +func TestSkipTopLevel(t *testing.T) { + output := prepare() + + SkipConvey("hi", t, func() { + output += "This shouldn't be executed!" + }) + + expectEqual(t, "", output) +} + +func TestSkipNestedLevel(t *testing.T) { + output := prepare() + + Convey("hi", t, func() { + output += "yes" + + SkipConvey("bye", func() { + output += "no" + }) + }) + + expectEqual(t, "yes", output) +} + +func TestSkipNestedLevelSkipsAllChildLevels(t *testing.T) { + output := prepare() + + Convey("hi", t, func() { + output += "yes" + + SkipConvey("bye", func() { + output += "no" + + Convey("byebye", func() { + output += "no-no" + }) + }) + }) + + expectEqual(t, "yes", output) +} + +func TestIterativeConveys(t *testing.T) { + output := prepare() + + Convey("Test", t, func() { + for x := 0; x < 10; x++ { + y := strconv.Itoa(x) + + Convey(y, func() { + output += y + }) + } + }) + + expectEqual(t, "0123456789", output) +} + +func TestClosureVariables(t *testing.T) { + output := prepare() + + i := 0 + + Convey("A", t, func() { + i = i + 1 + j := i + + output += "A" + strconv.Itoa(i) + " " + + Convey("B", func() { + k := j + j = j + 1 + + output += "B" + strconv.Itoa(k) + " " + + Convey("C", func() { + output += "C" + strconv.Itoa(k) + strconv.Itoa(j) + " " + }) + + Convey("D", func() { + output += "D" + strconv.Itoa(k) + strconv.Itoa(j) + " " + }) + }) + + Convey("C", func() { + output += "C" + strconv.Itoa(j) + " " + }) + }) + + output += "D" + strconv.Itoa(i) + " " + + expectEqual(t, "A1 B1 C12 A2 B2 D23 A3 C3 D3 ", output) +} + +func TestClosureVariablesWithReset(t *testing.T) { + output := prepare() + + i := 0 + + Convey("A", t, func() { + i = i + 1 + j := i + + output += "A" + strconv.Itoa(i) + " " + + Reset(func() { + output += "R" + strconv.Itoa(i) + strconv.Itoa(j) + " " + }) + + Convey("B", func() { + output += "B" + strconv.Itoa(j) + " " + }) + + Convey("C", func() { + output += "C" + strconv.Itoa(j) + " " + }) + }) + + output += "D" + strconv.Itoa(i) + " " + + expectEqual(t, "A1 B1 R11 A2 C2 R22 D2 ", output) +} + +func TestWrappedSimple(t *testing.T) { + prepare() + output := resetTestString{""} + + Convey("A", t, func() { + func() { + output.output += "A " + + Convey("B", func() { + output.output += "B " + + Convey("C", func() { + output.output += "C " + }) + + }) + + Convey("D", func() { + output.output += "D " + }) + }() + }) + + expectEqual(t, "A B C A D ", output.output) +} + +type resetTestString struct { + output string +} + +func addReset(o *resetTestString, f func()) func() { + return func() { + Reset(func() { + o.output += "R " + }) + + f() + } +} + +func TestWrappedReset(t *testing.T) { + prepare() + output := resetTestString{""} + + Convey("A", t, addReset(&output, func() { + output.output += "A " + + Convey("B", func() { + output.output += "B " + }) + + Convey("C", func() { + output.output += "C " + }) + })) + + expectEqual(t, "A B R A C R ", output.output) +} + +func TestWrappedReset2(t *testing.T) { + prepare() + output := resetTestString{""} + + Convey("A", t, func() { + Reset(func() { + output.output += "R " + }) + + func() { + output.output += "A " + + Convey("B", func() { + output.output += "B " + + Convey("C", func() { + output.output += "C " + }) + }) + + Convey("D", func() { + output.output += "D " + }) + }() + }) + + expectEqual(t, "A B C R A D R ", output.output) +} + +func TestInfiniteLoopWithTrailingFail(t *testing.T) { + done := make(chan int) + + go func() { + Convey("This fails", t, func() { + Convey("and this is run", func() { + So(true, ShouldEqual, true) + }) + + /* And this prevents the whole block to be marked as run */ + So(false, ShouldEqual, true) + }) + + done <- 1 + }() + + select { + case <-done: + return + case <-time.After(1 * time.Millisecond): + t.Fail() + } +} + +func TestOutermostResetInvokedForGrandchildren(t *testing.T) { + output := prepare() + + Convey("A", t, func() { + output += "A " + + Reset(func() { + output += "rA " + }) + + Convey("B", func() { + output += "B " + + Reset(func() { + output += "rB " + }) + + Convey("C", func() { + output += "C " + + Reset(func() { + output += "rC " + }) + }) + + Convey("D", func() { + output += "D " + + Reset(func() { + output += "rD " + }) + }) + }) + }) + + expectEqual(t, "A B C rC rB rA A B D rD rB rA ", output) +} + +func TestFailureOption(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + So(false, ShouldEqual, true) + output += "C " + }) + + expectEqual(t, "A B ", output) +} + +func TestFailureOption2(t *testing.T) { + output := prepare() + + Convey("A", t, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + So(false, ShouldEqual, true) + output += "C " + }) + + expectEqual(t, "A B ", output) +} + +func TestFailureOption3(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + So(false, ShouldEqual, true) + output += "C " + }) + + expectEqual(t, "A B C ", output) +} + +func TestFailureOptionInherit(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A1 " + So(false, ShouldEqual, true) + output += "A2 " + + Convey("B", func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + }) + + expectEqual(t, "A1 A2 B1 B2 B3 ", output) +} + +func TestFailureOptionInherit2(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A1 " + So(false, ShouldEqual, true) + output += "A2 " + + Convey("B", func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + So(false, ShouldEqual, true) + output += "A3 " + }) + }) + + expectEqual(t, "A1 ", output) +} + +func TestFailureOptionInherit3(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + + Convey("B", func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + }) + + expectEqual(t, "A1 A2 B1 B2 ", output) +} + +func TestFailureOptionNestedOverride(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A " + So(false, ShouldEqual, true) + output += "B " + + Convey("C", FailureHalts, func() { + output += "C " + So(true, ShouldEqual, true) + output += "D " + So(false, ShouldEqual, true) + output += "E " + }) + }) + + expectEqual(t, "A B C D ", output) +} + +func TestFailureOptionNestedOverride2(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + + Convey("C", FailureContinues, func() { + output += "C " + So(true, ShouldEqual, true) + output += "D " + So(false, ShouldEqual, true) + output += "E " + }) + }) + + expectEqual(t, "A B C D E ", output) +} + +func TestMultipleInvocationInheritance(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + + Convey("B", FailureContinues, func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + + Convey("C", func() { + output += "C1 " + So(true, ShouldEqual, true) + output += "C2 " + So(false, ShouldEqual, true) + output += "C3 " + }) + }) + + expectEqual(t, "A1 A2 B1 B2 B3 A1 A2 C1 C2 ", output) +} + +func TestMultipleInvocationInheritance2(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + So(false, ShouldEqual, true) + output += "A3 " + + Convey("B", FailureHalts, func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + + Convey("C", func() { + output += "C1 " + So(true, ShouldEqual, true) + output += "C2 " + So(false, ShouldEqual, true) + output += "C3 " + }) + }) + + expectEqual(t, "A1 A2 A3 B1 B2 A1 A2 A3 C1 C2 C3 ", output) +} + +func TestSetDefaultFailureMode(t *testing.T) { + output := prepare() + + SetDefaultFailureMode(FailureContinues) // the default is normally FailureHalts + defer SetDefaultFailureMode(FailureHalts) + + Convey("A", t, func() { + output += "A1 " + So(true, ShouldBeFalse) + output += "A2 " + }) + + expectEqual(t, "A1 A2 ", output) +} + +func prepare() string { + testReporter = newNilReporter() + return "" +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/nilReporter.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/nilReporter.go new file mode 100644 index 0000000000..777b2a5122 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/nilReporter.go @@ -0,0 +1,15 @@ +package convey + +import ( + "github.com/smartystreets/goconvey/convey/reporting" +) + +type nilReporter struct{} + +func (self *nilReporter) BeginStory(story *reporting.StoryReport) {} +func (self *nilReporter) Enter(scope *reporting.ScopeReport) {} +func (self *nilReporter) Report(report *reporting.AssertionResult) {} +func (self *nilReporter) Exit() {} +func (self *nilReporter) EndStory() {} +func (self *nilReporter) Write(p []byte) (int, error) { return len(p), nil } +func newNilReporter() *nilReporter { return &nilReporter{} } diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/console.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/console.go new file mode 100644 index 0000000000..7bf67dbb2b --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/console.go @@ -0,0 +1,16 @@ +package reporting + +import ( + "fmt" + "io" +) + +type console struct{} + +func (self *console) Write(p []byte) (n int, err error) { + return fmt.Print(string(p)) +} + +func NewConsole() io.Writer { + return new(console) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/doc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/doc.go new file mode 100644 index 0000000000..a37d001946 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/doc.go @@ -0,0 +1,5 @@ +// Package reporting contains internal functionality related +// to console reporting and output. Although this package has +// exported names is not intended for public consumption. See the +// examples package for how to use this project. +package reporting diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/dot.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/dot.go new file mode 100644 index 0000000000..47d57c6b0d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/dot.go @@ -0,0 +1,40 @@ +package reporting + +import "fmt" + +type dot struct{ out *Printer } + +func (self *dot) BeginStory(story *StoryReport) {} + +func (self *dot) Enter(scope *ScopeReport) {} + +func (self *dot) Report(report *AssertionResult) { + if report.Error != nil { + fmt.Print(redColor) + self.out.Insert(dotError) + } else if report.Failure != "" { + fmt.Print(yellowColor) + self.out.Insert(dotFailure) + } else if report.Skipped { + fmt.Print(yellowColor) + self.out.Insert(dotSkip) + } else { + fmt.Print(greenColor) + self.out.Insert(dotSuccess) + } + fmt.Print(resetColor) +} + +func (self *dot) Exit() {} + +func (self *dot) EndStory() {} + +func (self *dot) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewDotReporter(out *Printer) *dot { + self := new(dot) + self.out = out + return self +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/dot_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/dot_test.go new file mode 100644 index 0000000000..a8d20d46f0 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/dot_test.go @@ -0,0 +1,40 @@ +package reporting + +import ( + "errors" + "testing" +) + +func TestDotReporterAssertionPrinting(t *testing.T) { + monochrome() + file := newMemoryFile() + printer := NewPrinter(file) + reporter := NewDotReporter(printer) + + reporter.Report(NewSuccessReport()) + reporter.Report(NewFailureReport("failed")) + reporter.Report(NewErrorReport(errors.New("error"))) + reporter.Report(NewSkipReport()) + + expected := dotSuccess + dotFailure + dotError + dotSkip + + if file.buffer != expected { + t.Errorf("\nExpected: '%s'\nActual: '%s'", expected, file.buffer) + } +} + +func TestDotReporterOnlyReportsAssertions(t *testing.T) { + monochrome() + file := newMemoryFile() + printer := NewPrinter(file) + reporter := NewDotReporter(printer) + + reporter.BeginStory(nil) + reporter.Enter(nil) + reporter.Exit() + reporter.EndStory() + + if file.buffer != "" { + t.Errorf("\nExpected: '(blank)'\nActual: '%s'", file.buffer) + } +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/gotest.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/gotest.go new file mode 100644 index 0000000000..c396e16b17 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/gotest.go @@ -0,0 +1,33 @@ +package reporting + +type gotestReporter struct{ test T } + +func (self *gotestReporter) BeginStory(story *StoryReport) { + self.test = story.Test +} + +func (self *gotestReporter) Enter(scope *ScopeReport) {} + +func (self *gotestReporter) Report(r *AssertionResult) { + if !passed(r) { + self.test.Fail() + } +} + +func (self *gotestReporter) Exit() {} + +func (self *gotestReporter) EndStory() { + self.test = nil +} + +func (self *gotestReporter) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewGoTestReporter() *gotestReporter { + return new(gotestReporter) +} + +func passed(r *AssertionResult) bool { + return r.Error == nil && r.Failure == "" +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go new file mode 100644 index 0000000000..fda189458e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go @@ -0,0 +1,66 @@ +package reporting + +import "testing" + +func TestReporterReceivesSuccessfulReport(t *testing.T) { + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.Report(NewSuccessReport()) + + if test.failed { + t.Errorf("Should have have marked test as failed--the report reflected success.") + } +} + +func TestReporterReceivesFailureReport(t *testing.T) { + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.Report(NewFailureReport("This is a failure.")) + + if !test.failed { + t.Errorf("Test should have been marked as failed (but it wasn't).") + } +} + +func TestReporterReceivesErrorReport(t *testing.T) { + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.Report(NewErrorReport("This is an error.")) + + if !test.failed { + t.Errorf("Test should have been marked as failed (but it wasn't).") + } +} + +func TestReporterIsResetAtTheEndOfTheStory(t *testing.T) { + defer catch(t) + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.EndStory() + + reporter.Report(NewSuccessReport()) +} + +func TestReporterNoopMethods(t *testing.T) { + reporter := NewGoTestReporter() + reporter.Enter(NewScopeReport("title")) + reporter.Exit() +} + +func catch(t *testing.T) { + if r := recover(); r != nil { + t.Log("Getting to this point means we've passed (because we caught a panic appropriately).") + } +} + +type fakeTest struct { + failed bool +} + +func (self *fakeTest) Fail() { + self.failed = true +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/init.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/init.go new file mode 100644 index 0000000000..99c3bd6d61 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/init.go @@ -0,0 +1,94 @@ +package reporting + +import ( + "os" + "runtime" + "strings" +) + +func init() { + if !isColorableTerminal() { + monochrome() + } + + if runtime.GOOS == "windows" { + success, failure, error_ = dotSuccess, dotFailure, dotError + } +} + +func BuildJsonReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewJsonReporter(out)) +} +func BuildDotReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewDotReporter(out), + NewProblemReporter(out), + consoleStatistics) +} +func BuildStoryReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewStoryReporter(out), + NewProblemReporter(out), + consoleStatistics) +} +func BuildSilentReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewSilentProblemReporter(out)) +} + +var ( + newline = "\n" + success = "✔" + failure = "✘" + error_ = "🔥" + skip = "⚠" + dotSuccess = "." + dotFailure = "x" + dotError = "E" + dotSkip = "S" + errorTemplate = "* %s \nLine %d: - %v \n%s\n" + failureTemplate = "* %s \nLine %d:\n%s\n%s\n" +) + +var ( + greenColor = "\033[32m" + yellowColor = "\033[33m" + redColor = "\033[31m" + resetColor = "\033[0m" +) + +var consoleStatistics = NewStatisticsReporter(NewPrinter(NewConsole())) + +func SuppressConsoleStatistics() { consoleStatistics.Suppress() } +func PrintConsoleStatistics() { consoleStatistics.PrintSummary() } + +// QuietMode disables all console output symbols. This is only meant to be used +// for tests that are internal to goconvey where the output is distracting or +// otherwise not needed in the test output. +func QuietMode() { + success, failure, error_, skip, dotSuccess, dotFailure, dotError, dotSkip = "", "", "", "", "", "", "", "" +} + +func monochrome() { + greenColor, yellowColor, redColor, resetColor = "", "", "", "" +} + +func isColorableTerminal() bool { + return strings.Contains(os.Getenv("TERM"), "color") +} + +// This interface allows us to pass the *testing.T struct +// throughout the internals of this tool without ever +// having to import the "testing" package. +type T interface { + Fail() +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/json.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/json.go new file mode 100644 index 0000000000..f8526979f8 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/json.go @@ -0,0 +1,88 @@ +// TODO: under unit test + +package reporting + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type JsonReporter struct { + out *Printer + currentKey []string + current *ScopeResult + index map[string]*ScopeResult + scopes []*ScopeResult +} + +func (self *JsonReporter) depth() int { return len(self.currentKey) } + +func (self *JsonReporter) BeginStory(story *StoryReport) {} + +func (self *JsonReporter) Enter(scope *ScopeReport) { + self.currentKey = append(self.currentKey, scope.Title) + ID := strings.Join(self.currentKey, "|") + if _, found := self.index[ID]; !found { + next := newScopeResult(scope.Title, self.depth(), scope.File, scope.Line) + self.scopes = append(self.scopes, next) + self.index[ID] = next + } + self.current = self.index[ID] +} + +func (self *JsonReporter) Report(report *AssertionResult) { + self.current.Assertions = append(self.current.Assertions, report) +} + +func (self *JsonReporter) Exit() { + self.currentKey = self.currentKey[:len(self.currentKey)-1] +} + +func (self *JsonReporter) EndStory() { + self.report() + self.reset() +} +func (self *JsonReporter) report() { + scopes := []string{} + for _, scope := range self.scopes { + serialized, err := json.Marshal(scope) + if err != nil { + self.out.Println(jsonMarshalFailure) + panic(err) + } + var buffer bytes.Buffer + json.Indent(&buffer, serialized, "", " ") + scopes = append(scopes, buffer.String()) + } + self.out.Print(fmt.Sprintf("%s\n%s,\n%s\n", OpenJson, strings.Join(scopes, ","), CloseJson)) +} +func (self *JsonReporter) reset() { + self.scopes = []*ScopeResult{} + self.index = map[string]*ScopeResult{} + self.currentKey = nil +} + +func (self *JsonReporter) Write(content []byte) (written int, err error) { + self.current.Output += string(content) + return len(content), nil +} + +func NewJsonReporter(out *Printer) *JsonReporter { + self := new(JsonReporter) + self.out = out + self.reset() + return self +} + +const OpenJson = ">->->OPEN-JSON->->->" // "⌦" +const CloseJson = "<-<-<-CLOSE-JSON<-<-<" // "⌫" +const jsonMarshalFailure = ` + +GOCONVEY_JSON_MARSHALL_FAILURE: There was an error when attempting to convert test results to JSON. +Please file a bug report and reference the code that caused this failure if possible. + +Here's the panic: + +` diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/printer.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/printer.go new file mode 100644 index 0000000000..3dac0d4d28 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/printer.go @@ -0,0 +1,60 @@ +package reporting + +import ( + "fmt" + "io" + "strings" +) + +type Printer struct { + out io.Writer + prefix string +} + +func (self *Printer) Println(message string, values ...interface{}) { + formatted := self.format(message, values...) + newline + self.out.Write([]byte(formatted)) +} + +func (self *Printer) Print(message string, values ...interface{}) { + formatted := self.format(message, values...) + self.out.Write([]byte(formatted)) +} + +func (self *Printer) Insert(text string) { + self.out.Write([]byte(text)) +} + +func (self *Printer) format(message string, values ...interface{}) string { + var formatted string + if len(values) == 0 { + formatted = self.prefix + message + } else { + formatted = self.prefix + fmt_Sprintf(message, values...) + } + indented := strings.Replace(formatted, newline, newline+self.prefix, -1) + return strings.TrimRight(indented, space) +} + +// Extracting fmt.Sprintf to a separate variable circumvents go vet, which, as of go 1.10 is run with go test. +var fmt_Sprintf = fmt.Sprintf + +func (self *Printer) Indent() { + self.prefix += pad +} + +func (self *Printer) Dedent() { + if len(self.prefix) >= padLength { + self.prefix = self.prefix[:len(self.prefix)-padLength] + } +} + +func NewPrinter(out io.Writer) *Printer { + self := new(Printer) + self.out = out + return self +} + +const space = " " +const pad = space + space +const padLength = len(pad) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/printer_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/printer_test.go new file mode 100644 index 0000000000..23829ff5e0 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/printer_test.go @@ -0,0 +1,181 @@ +package reporting + +import "testing" + +func TestPrint(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!" + + printer.Print(expected) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintFormat(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + template := "Hi, %s" + name := "Ralph" + expected := "Hi, Ralph" + + printer.Print(template, name) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintPreservesEncodedStrings(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "= -> %%3D" + printer.Print(expected) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintln(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!" + + printer.Println(expected) + + if file.buffer != expected+"\n" { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnFormat(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + template := "Hi, %s" + name := "Ralph" + expected := "Hi, Ralph\n" + + printer.Println(template, name) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnPreservesEncodedStrings(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "= -> %%3D" + printer.Println(expected) + + if file.buffer != expected+"\n" { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintIndented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const message = "Hello, World!\nGoodbye, World!" + const expected = " Hello, World!\n Goodbye, World!" + + printer.Indent() + printer.Print(message) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintDedented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!\nGoodbye, World!" + + printer.Indent() + printer.Dedent() + printer.Print(expected) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnIndented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const message = "Hello, World!\nGoodbye, World!" + const expected = " Hello, World!\n Goodbye, World!\n" + + printer.Indent() + printer.Println(message) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnDedented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!\nGoodbye, World!" + + printer.Indent() + printer.Dedent() + printer.Println(expected) + + if file.buffer != expected+"\n" { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestDedentTooFarShouldNotPanic(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Error("Should not have panicked!") + } + }() + file := newMemoryFile() + printer := NewPrinter(file) + + printer.Dedent() + + t.Log("Getting to this point without panicking means we passed.") +} + +func TestInsert(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + + printer.Indent() + printer.Print("Hi") + printer.Insert(" there") + printer.Dedent() + + expected := " Hi there" + if file.buffer != expected { + t.Errorf("Should have written '%s' but instead wrote '%s'.", expected, file.buffer) + } +} + +////////////////// memoryFile //////////////////// + +type memoryFile struct { + buffer string +} + +func (self *memoryFile) Write(p []byte) (n int, err error) { + self.buffer += string(p) + return len(p), nil +} + +func (self *memoryFile) String() string { + return self.buffer +} + +func newMemoryFile() *memoryFile { + return new(memoryFile) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/problems.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/problems.go new file mode 100644 index 0000000000..33d5e14767 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/problems.go @@ -0,0 +1,80 @@ +package reporting + +import "fmt" + +type problem struct { + silent bool + out *Printer + errors []*AssertionResult + failures []*AssertionResult +} + +func (self *problem) BeginStory(story *StoryReport) {} + +func (self *problem) Enter(scope *ScopeReport) {} + +func (self *problem) Report(report *AssertionResult) { + if report.Error != nil { + self.errors = append(self.errors, report) + } else if report.Failure != "" { + self.failures = append(self.failures, report) + } +} + +func (self *problem) Exit() {} + +func (self *problem) EndStory() { + self.show(self.showErrors, redColor) + self.show(self.showFailures, yellowColor) + self.prepareForNextStory() +} +func (self *problem) show(display func(), color string) { + if !self.silent { + fmt.Print(color) + } + display() + if !self.silent { + fmt.Print(resetColor) + } + self.out.Dedent() +} +func (self *problem) showErrors() { + for i, e := range self.errors { + if i == 0 { + self.out.Println("\nErrors:\n") + self.out.Indent() + } + self.out.Println(errorTemplate, e.File, e.Line, e.Error, e.StackTrace) + } +} +func (self *problem) showFailures() { + for i, f := range self.failures { + if i == 0 { + self.out.Println("\nFailures:\n") + self.out.Indent() + } + self.out.Println(failureTemplate, f.File, f.Line, f.Failure, f.StackTrace) + } +} + +func (self *problem) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewProblemReporter(out *Printer) *problem { + self := new(problem) + self.out = out + self.prepareForNextStory() + return self +} + +func NewSilentProblemReporter(out *Printer) *problem { + self := NewProblemReporter(out) + self.silent = true + return self +} + +func (self *problem) prepareForNextStory() { + self.errors = []*AssertionResult{} + self.failures = []*AssertionResult{} +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/problems_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/problems_test.go new file mode 100644 index 0000000000..92f0ca35cc --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/problems_test.go @@ -0,0 +1,51 @@ +package reporting + +import ( + "strings" + "testing" +) + +func TestNoopProblemReporterActions(t *testing.T) { + file, reporter := setup() + reporter.BeginStory(nil) + reporter.Enter(nil) + reporter.Exit() + expected := "" + actual := file.String() + if expected != actual { + t.Errorf("Expected: '(blank)'\nActual: '%s'", actual) + } +} + +func TestReporterPrintsFailuresAndErrorsAtTheEndOfTheStory(t *testing.T) { + file, reporter := setup() + reporter.Report(NewFailureReport("failed")) + reporter.Report(NewErrorReport("error")) + reporter.Report(NewSuccessReport()) + reporter.EndStory() + + result := file.String() + if !strings.Contains(result, "Errors:\n") { + t.Errorf("Expected errors, found none.") + } + if !strings.Contains(result, "Failures:\n") { + t.Errorf("Expected failures, found none.") + } + + // Each stack trace looks like: `* /path/to/file.go`, so look for `* `. + // With go 1.4+ there is a line in some stack traces that looks like this: + // `testing.(*M).Run(0x2082d60a0, 0x25b7c0)` + // So we can't just look for "*" anymore. + problemCount := strings.Count(result, "* ") + if problemCount != 2 { + t.Errorf("Expected one failure and one error (total of 2 '*' characters). Got %d", problemCount) + } +} + +func setup() (file *memoryFile, reporter *problem) { + monochrome() + file = newMemoryFile() + printer := NewPrinter(file) + reporter = NewProblemReporter(printer) + return +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporter.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporter.go new file mode 100644 index 0000000000..cce6c5e438 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporter.go @@ -0,0 +1,39 @@ +package reporting + +import "io" + +type Reporter interface { + BeginStory(story *StoryReport) + Enter(scope *ScopeReport) + Report(r *AssertionResult) + Exit() + EndStory() + io.Writer +} + +type reporters struct{ collection []Reporter } + +func (self *reporters) BeginStory(s *StoryReport) { self.foreach(func(r Reporter) { r.BeginStory(s) }) } +func (self *reporters) Enter(s *ScopeReport) { self.foreach(func(r Reporter) { r.Enter(s) }) } +func (self *reporters) Report(a *AssertionResult) { self.foreach(func(r Reporter) { r.Report(a) }) } +func (self *reporters) Exit() { self.foreach(func(r Reporter) { r.Exit() }) } +func (self *reporters) EndStory() { self.foreach(func(r Reporter) { r.EndStory() }) } + +func (self *reporters) Write(contents []byte) (written int, err error) { + self.foreach(func(r Reporter) { + written, err = r.Write(contents) + }) + return written, err +} + +func (self *reporters) foreach(action func(Reporter)) { + for _, r := range self.collection { + action(r) + } +} + +func NewReporters(collection ...Reporter) *reporters { + self := new(reporters) + self.collection = collection + return self +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go new file mode 100644 index 0000000000..35f0c5742f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go @@ -0,0 +1,94 @@ +package reporting + +import ( + "runtime" + "testing" +) + +func TestEachNestedReporterReceivesTheCallFromTheContainingReporter(t *testing.T) { + fake1 := newFakeReporter() + fake2 := newFakeReporter() + reporter := NewReporters(fake1, fake2) + + reporter.BeginStory(nil) + assertTrue(t, fake1.begun) + assertTrue(t, fake2.begun) + + reporter.Enter(NewScopeReport("scope")) + assertTrue(t, fake1.entered) + assertTrue(t, fake2.entered) + + reporter.Report(NewSuccessReport()) + assertTrue(t, fake1.reported) + assertTrue(t, fake2.reported) + + reporter.Exit() + assertTrue(t, fake1.exited) + assertTrue(t, fake2.exited) + + reporter.EndStory() + assertTrue(t, fake1.ended) + assertTrue(t, fake2.ended) + + content := []byte("hi") + written, err := reporter.Write(content) + assertTrue(t, fake1.written) + assertTrue(t, fake2.written) + assertEqual(t, written, len(content)) + assertNil(t, err) + +} + +func assertTrue(t *testing.T, value bool) { + if !value { + _, _, line, _ := runtime.Caller(1) + t.Errorf("Value should have been true (but was false). See line %d", line) + } +} + +func assertEqual(t *testing.T, expected, actual int) { + if actual != expected { + _, _, line, _ := runtime.Caller(1) + t.Errorf("Value should have been %d (but was %d). See line %d", expected, actual, line) + } +} + +func assertNil(t *testing.T, err error) { + if err != nil { + _, _, line, _ := runtime.Caller(1) + t.Errorf("Error should have been (but wasn't). See line %d. %v", err, line) + } +} + +type fakeReporter struct { + begun bool + entered bool + reported bool + exited bool + ended bool + written bool +} + +func newFakeReporter() *fakeReporter { + return &fakeReporter{} +} + +func (self *fakeReporter) BeginStory(story *StoryReport) { + self.begun = true +} +func (self *fakeReporter) Enter(scope *ScopeReport) { + self.entered = true +} +func (self *fakeReporter) Report(report *AssertionResult) { + self.reported = true +} +func (self *fakeReporter) Exit() { + self.exited = true +} +func (self *fakeReporter) EndStory() { + self.ended = true +} +func (self *fakeReporter) Write(content []byte) (int, error) { + self.written = true + return len(content), nil +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey new file mode 100644 index 0000000000..79982854b5 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey @@ -0,0 +1,2 @@ +#ignore +-timeout=1s diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reports.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reports.go new file mode 100644 index 0000000000..712e6ade62 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/reports.go @@ -0,0 +1,179 @@ +package reporting + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + + "github.com/smartystreets/goconvey/convey/gotest" +) + +////////////////// ScopeReport //////////////////// + +type ScopeReport struct { + Title string + File string + Line int +} + +func NewScopeReport(title string) *ScopeReport { + file, line, _ := gotest.ResolveExternalCaller() + self := new(ScopeReport) + self.Title = title + self.File = file + self.Line = line + return self +} + +////////////////// ScopeResult //////////////////// + +type ScopeResult struct { + Title string + File string + Line int + Depth int + Assertions []*AssertionResult + Output string +} + +func newScopeResult(title string, depth int, file string, line int) *ScopeResult { + self := new(ScopeResult) + self.Title = title + self.Depth = depth + self.File = file + self.Line = line + self.Assertions = []*AssertionResult{} + return self +} + +/////////////////// StoryReport ///////////////////// + +type StoryReport struct { + Test T + Name string + File string + Line int +} + +func NewStoryReport(test T) *StoryReport { + file, line, name := gotest.ResolveExternalCaller() + name = removePackagePath(name) + self := new(StoryReport) + self.Test = test + self.Name = name + self.File = file + self.Line = line + return self +} + +// name comes in looking like "github.com/smartystreets/goconvey/examples.TestName". +// We only want the stuff after the last '.', which is the name of the test function. +func removePackagePath(name string) string { + parts := strings.Split(name, ".") + return parts[len(parts)-1] +} + +/////////////////// FailureView //////////////////////// + +// This struct is also declared in github.com/smartystreets/assertions. +// The json struct tags should be equal in both declarations. +type FailureView struct { + Message string `json:"Message"` + Expected string `json:"Expected"` + Actual string `json:"Actual"` +} + +////////////////////AssertionResult ////////////////////// + +type AssertionResult struct { + File string + Line int + Expected string + Actual string + Failure string + Error interface{} + StackTrace string + Skipped bool +} + +func NewFailureReport(failure string) *AssertionResult { + report := new(AssertionResult) + report.File, report.Line = caller() + report.StackTrace = stackTrace() + parseFailure(failure, report) + return report +} +func parseFailure(failure string, report *AssertionResult) { + view := new(FailureView) + err := json.Unmarshal([]byte(failure), view) + if err == nil { + report.Failure = view.Message + report.Expected = view.Expected + report.Actual = view.Actual + } else { + report.Failure = failure + } +} +func NewErrorReport(err interface{}) *AssertionResult { + report := new(AssertionResult) + report.File, report.Line = caller() + report.StackTrace = fullStackTrace() + report.Error = fmt.Sprintf("%v", err) + return report +} +func NewSuccessReport() *AssertionResult { + return new(AssertionResult) +} +func NewSkipReport() *AssertionResult { + report := new(AssertionResult) + report.File, report.Line = caller() + report.StackTrace = fullStackTrace() + report.Skipped = true + return report +} + +func caller() (file string, line int) { + file, line, _ = gotest.ResolveExternalCaller() + return +} + +func stackTrace() string { + buffer := make([]byte, 1024*64) + n := runtime.Stack(buffer, false) + return removeInternalEntries(string(buffer[:n])) +} +func fullStackTrace() string { + buffer := make([]byte, 1024*64) + n := runtime.Stack(buffer, true) + return removeInternalEntries(string(buffer[:n])) +} +func removeInternalEntries(stack string) string { + lines := strings.Split(stack, newline) + filtered := []string{} + for _, line := range lines { + if !isExternal(line) { + filtered = append(filtered, line) + } + } + return strings.Join(filtered, newline) +} +func isExternal(line string) bool { + for _, p := range internalPackages { + if strings.Contains(line, p) { + return true + } + } + return false +} + +// NOTE: any new packages that host goconvey packages will need to be added here! +// An alternative is to scan the goconvey directory and then exclude stuff like +// the examples package but that's nasty too. +var internalPackages = []string{ + "goconvey/assertions", + "goconvey/convey", + "goconvey/execution", + "goconvey/gotest", + "goconvey/reporting", +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/statistics.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/statistics.go new file mode 100644 index 0000000000..c3ccd056a0 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/statistics.go @@ -0,0 +1,108 @@ +package reporting + +import ( + "fmt" + "sync" +) + +func (self *statistics) BeginStory(story *StoryReport) {} + +func (self *statistics) Enter(scope *ScopeReport) {} + +func (self *statistics) Report(report *AssertionResult) { + self.Lock() + defer self.Unlock() + + if !self.failing && report.Failure != "" { + self.failing = true + } + if !self.erroring && report.Error != nil { + self.erroring = true + } + if report.Skipped { + self.skipped += 1 + } else { + self.total++ + } +} + +func (self *statistics) Exit() {} + +func (self *statistics) EndStory() { + self.Lock() + defer self.Unlock() + + if !self.suppressed { + self.printSummaryLocked() + } +} + +func (self *statistics) Suppress() { + self.Lock() + defer self.Unlock() + self.suppressed = true +} + +func (self *statistics) PrintSummary() { + self.Lock() + defer self.Unlock() + self.printSummaryLocked() +} + +func (self *statistics) printSummaryLocked() { + self.reportAssertionsLocked() + self.reportSkippedSectionsLocked() + self.completeReportLocked() +} +func (self *statistics) reportAssertionsLocked() { + self.decideColorLocked() + self.out.Print("\n%d total %s", self.total, plural("assertion", self.total)) +} +func (self *statistics) decideColorLocked() { + if self.failing && !self.erroring { + fmt.Print(yellowColor) + } else if self.erroring { + fmt.Print(redColor) + } else { + fmt.Print(greenColor) + } +} +func (self *statistics) reportSkippedSectionsLocked() { + if self.skipped > 0 { + fmt.Print(yellowColor) + self.out.Print(" (one or more sections skipped)") + } +} +func (self *statistics) completeReportLocked() { + fmt.Print(resetColor) + self.out.Print("\n") + self.out.Print("\n") +} + +func (self *statistics) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewStatisticsReporter(out *Printer) *statistics { + self := statistics{} + self.out = out + return &self +} + +type statistics struct { + sync.Mutex + + out *Printer + total int + failing bool + erroring bool + skipped int + suppressed bool +} + +func plural(word string, count int) string { + if count == 1 { + return word + } + return word + "s" +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/story.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/story.go new file mode 100644 index 0000000000..9e73c971f8 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting/story.go @@ -0,0 +1,73 @@ +// TODO: in order for this reporter to be completely honest +// we need to retrofit to be more like the json reporter such that: +// 1. it maintains ScopeResult collections, which count assertions +// 2. it reports only after EndStory(), so that all tick marks +// are placed near the appropriate title. +// 3. Under unit test + +package reporting + +import ( + "fmt" + "strings" +) + +type story struct { + out *Printer + titlesById map[string]string + currentKey []string +} + +func (self *story) BeginStory(story *StoryReport) {} + +func (self *story) Enter(scope *ScopeReport) { + self.out.Indent() + + self.currentKey = append(self.currentKey, scope.Title) + ID := strings.Join(self.currentKey, "|") + + if _, found := self.titlesById[ID]; !found { + self.out.Println("") + self.out.Print(scope.Title) + self.out.Insert(" ") + self.titlesById[ID] = scope.Title + } +} + +func (self *story) Report(report *AssertionResult) { + if report.Error != nil { + fmt.Print(redColor) + self.out.Insert(error_) + } else if report.Failure != "" { + fmt.Print(yellowColor) + self.out.Insert(failure) + } else if report.Skipped { + fmt.Print(yellowColor) + self.out.Insert(skip) + } else { + fmt.Print(greenColor) + self.out.Insert(success) + } + fmt.Print(resetColor) +} + +func (self *story) Exit() { + self.out.Dedent() + self.currentKey = self.currentKey[:len(self.currentKey)-1] +} + +func (self *story) EndStory() { + self.titlesById = make(map[string]string) + self.out.Println("\n") +} + +func (self *story) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewStoryReporter(out *Printer) *story { + self := new(story) + self.out = out + self.titlesById = make(map[string]string) + return self +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go new file mode 100644 index 0000000000..69125c3cf4 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go @@ -0,0 +1,317 @@ +package convey + +import ( + "fmt" + "net/http" + "net/http/httptest" + "path" + "runtime" + "strconv" + "strings" + "testing" + + "github.com/smartystreets/goconvey/convey/reporting" +) + +func TestSingleScopeReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(1, ShouldEqual, 1) + }) + + expectEqual(t, "Begin|A|Success|Exit|End", myReporter.wholeStory()) +} + +func TestNestedScopeReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("B", func() { + So(1, ShouldEqual, 1) + }) + }) + + expectEqual(t, "Begin|A|B|Success|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestFailureReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(1, ShouldBeNil) + }) + + expectEqual(t, "Begin|A|Failure|Exit|End", myReporter.wholeStory()) +} + +func TestFirstFailureEndsScopeExecution(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(1, ShouldBeNil) + So(nil, ShouldBeNil) + }) + + expectEqual(t, "Begin|A|Failure|Exit|End", myReporter.wholeStory()) +} + +func TestComparisonFailureDeserializedAndReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So("hi", ShouldEqual, "bye") + }) + + expectEqual(t, "Begin|A|Failure(bye/hi)|Exit|End", myReporter.wholeStory()) +} + +func TestNestedFailureReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("B", func() { + So(2, ShouldBeNil) + }) + }) + + expectEqual(t, "Begin|A|B|Failure|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestSuccessAndFailureReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(nil, ShouldBeNil) + So(1, ShouldBeNil) + }) + + expectEqual(t, "Begin|A|Success|Failure|Exit|End", myReporter.wholeStory()) +} + +func TestIncompleteActionReportedAsSkipped(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("B", nil) + }) + + expectEqual(t, "Begin|A|B|Skipped|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestSkippedConveyReportedAsSkipped(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + SkipConvey("B", func() { + So(1, ShouldEqual, 1) + }) + }) + + expectEqual(t, "Begin|A|B|Skipped|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestMultipleSkipsAreReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("0", func() { + So(nil, ShouldBeNil) + }) + + SkipConvey("1", func() {}) + SkipConvey("2", func() {}) + + Convey("3", nil) + Convey("4", nil) + + Convey("5", func() { + So(nil, ShouldBeNil) + }) + }) + + expected := "Begin" + + "|A|0|Success|Exit|Exit" + + "|A|1|Skipped|Exit|Exit" + + "|A|2|Skipped|Exit|Exit" + + "|A|3|Skipped|Exit|Exit" + + "|A|4|Skipped|Exit|Exit" + + "|A|5|Success|Exit|Exit" + + "|End" + + expectEqual(t, expected, myReporter.wholeStory()) +} + +func TestSkippedAssertionIsNotReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + SkipSo(1, ShouldEqual, 1) + }) + + expectEqual(t, "Begin|A|Skipped|Exit|End", myReporter.wholeStory()) +} + +func TestMultipleSkippedAssertionsAreNotReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + SkipSo(1, ShouldEqual, 1) + So(1, ShouldEqual, 1) + SkipSo(1, ShouldEqual, 1) + }) + + expectEqual(t, "Begin|A|Skipped|Success|Skipped|Exit|End", myReporter.wholeStory()) +} + +func TestErrorByManualPanicReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + panic("Gopher alert!") + }) + + expectEqual(t, "Begin|A|Error|Exit|End", myReporter.wholeStory()) +} + +func TestIterativeConveysReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + for x := 0; x < 3; x++ { + Convey(strconv.Itoa(x), func() { + So(x, ShouldEqual, x) + }) + } + }) + + expectEqual(t, "Begin|A|0|Success|Exit|Exit|A|1|Success|Exit|Exit|A|2|Success|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestNestedIterativeConveysReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + for x := 0; x < 3; x++ { + Convey(strconv.Itoa(x), func() { + for y := 0; y < 3; y++ { + Convey("< "+strconv.Itoa(y), func() { + So(x, ShouldBeLessThan, y) + }) + } + }) + } + }) + + expectEqual(t, ("Begin|" + + "A|0|< 0|Failure|Exit|Exit|Exit|" + + "A|0|< 1|Success|Exit|Exit|Exit|" + + "A|0|< 2|Success|Exit|Exit|Exit|" + + "A|1|< 0|Failure|Exit|Exit|Exit|" + + "A|1|< 1|Failure|Exit|Exit|Exit|" + + "A|1|< 2|Success|Exit|Exit|Exit|" + + "A|2|< 0|Failure|Exit|Exit|Exit|" + + "A|2|< 1|Failure|Exit|Exit|Exit|" + + "A|2|< 2|Failure|Exit|Exit|Exit|" + + "End"), myReporter.wholeStory()) +} + +func TestEmbeddedAssertionReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func(c C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c.So(r.FormValue("msg"), ShouldEqual, "ping") + })) + http.DefaultClient.Get(ts.URL + "?msg=ping") + }) + + expectEqual(t, "Begin|A|Success|Exit|End", myReporter.wholeStory()) +} + +func TestEmbeddedContextHelperReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + helper := func(c C) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c.Convey("Embedded", func() { + So(r.FormValue("msg"), ShouldEqual, "ping") + }) + }) + } + + Convey("A", test, func(c C) { + ts := httptest.NewServer(helper(c)) + http.DefaultClient.Get(ts.URL + "?msg=ping") + }) + + expectEqual(t, "Begin|A|Embedded|Success|Exit|Exit|End", myReporter.wholeStory()) +} + +func expectEqual(t *testing.T, expected interface{}, actual interface{}) { + if expected != actual { + _, file, line, _ := runtime.Caller(1) + t.Errorf("Expected '%v' to be '%v' but it wasn't. See '%s' at line %d.", + actual, expected, path.Base(file), line) + } +} + +func setupFakeReporter() (*fakeReporter, *fakeGoTest) { + myReporter := new(fakeReporter) + myReporter.calls = []string{} + testReporter = myReporter + return myReporter, new(fakeGoTest) +} + +type fakeReporter struct { + calls []string +} + +func (self *fakeReporter) BeginStory(story *reporting.StoryReport) { + self.calls = append(self.calls, "Begin") +} + +func (self *fakeReporter) Enter(scope *reporting.ScopeReport) { + self.calls = append(self.calls, scope.Title) +} + +func (self *fakeReporter) Report(report *reporting.AssertionResult) { + if report.Error != nil { + self.calls = append(self.calls, "Error") + } else if report.Failure != "" { + message := "Failure" + if report.Expected != "" || report.Actual != "" { + message += fmt.Sprintf("(%s/%s)", report.Expected, report.Actual) + } + self.calls = append(self.calls, message) + } else if report.Skipped { + self.calls = append(self.calls, "Skipped") + } else { + self.calls = append(self.calls, "Success") + } +} + +func (self *fakeReporter) Exit() { + self.calls = append(self.calls, "Exit") +} + +func (self *fakeReporter) EndStory() { + self.calls = append(self.calls, "End") +} + +func (self *fakeReporter) Write(content []byte) (int, error) { + return len(content), nil // no-op +} + +func (self *fakeReporter) wholeStory() string { + return strings.Join(self.calls, "|") +} + +//////////////////////////////// + +type fakeGoTest struct{} + +func (self *fakeGoTest) Fail() {} +func (self *fakeGoTest) Fatalf(format string, args ...interface{}) {} + +var test t = new(fakeGoTest) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/story_conventions_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/story_conventions_test.go new file mode 100644 index 0000000000..7bdd398653 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/convey/story_conventions_test.go @@ -0,0 +1,175 @@ +package convey + +import ( + "reflect" + "testing" +) + +func expectPanic(t *testing.T, f string) interface{} { + r := recover() + if r != nil { + if cp, ok := r.(*conveyErr); ok { + if cp.fmt != f { + t.Error("Incorrect panic message.") + } + } else { + t.Errorf("Incorrect panic type. %s", reflect.TypeOf(r)) + } + } else { + t.Error("Expected panic but none occurred") + } + return r +} + +func TestMissingTopLevelGoTestReferenceCausesPanic(t *testing.T) { + output := map[string]bool{} + + defer expectEqual(t, false, output["good"]) + defer expectPanic(t, missingGoTest) + + Convey("Hi", func() { + output["bad"] = true // this shouldn't happen + }) +} + +func TestMissingTopLevelGoTestReferenceAfterGoodExample(t *testing.T) { + output := map[string]bool{} + + defer func() { + expectEqual(t, true, output["good"]) + expectEqual(t, false, output["bad"]) + }() + defer expectPanic(t, missingGoTest) + + Convey("Good example", t, func() { + output["good"] = true + }) + + Convey("Bad example", func() { + output["bad"] = true // shouldn't happen + }) +} + +func TestExtraReferencePanics(t *testing.T) { + output := map[string]bool{} + + defer expectEqual(t, false, output["bad"]) + defer expectPanic(t, extraGoTest) + + Convey("Good example", t, func() { + Convey("Bad example - passing in *testing.T a second time!", t, func() { + output["bad"] = true // shouldn't happen + }) + }) +} + +func TestParseRegistrationMissingRequiredElements(t *testing.T) { + defer expectPanic(t, parseError) + + Convey() +} + +func TestParseRegistration_MissingNameString(t *testing.T) { + defer expectPanic(t, parseError) + + Convey(func() {}) +} + +func TestParseRegistration_MissingActionFunc(t *testing.T) { + defer expectPanic(t, parseError) + + Convey("Hi there", 12345) +} + +func TestFailureModeNoContext(t *testing.T) { + Convey("Foo", t, func() { + done := make(chan int, 1) + go func() { + defer func() { done <- 1 }() + defer expectPanic(t, noStackContext) + So(len("I have no context"), ShouldBeGreaterThan, 0) + }() + <-done + }) +} + +func TestFailureModeDuplicateSuite(t *testing.T) { + Convey("cool", t, func() { + defer expectPanic(t, multipleIdenticalConvey) + + Convey("dup", nil) + Convey("dup", nil) + }) +} + +func TestFailureModeIndeterminentSuiteNames(t *testing.T) { + defer expectPanic(t, differentConveySituations) + + name := "bob" + Convey("cool", t, func() { + for i := 0; i < 3; i++ { + Convey(name, func() {}) + name += "bob" + } + }) +} + +func TestFailureModeNestedIndeterminentSuiteNames(t *testing.T) { + defer expectPanic(t, differentConveySituations) + + name := "bob" + Convey("cool", t, func() { + Convey("inner", func() { + for i := 0; i < 3; i++ { + Convey(name, func() {}) + name += "bob" + } + }) + }) +} + +func TestFailureModeParameterButMissing(t *testing.T) { + defer expectPanic(t, parseError) + + prepare() + + Convey("Foobar", t, FailureHalts) +} + +func TestFailureModeParameterWithAction(t *testing.T) { + prepare() + + Convey("Foobar", t, FailureHalts, func() {}) +} + +func TestExtraConveyParameters(t *testing.T) { + defer expectPanic(t, parseError) + + prepare() + + Convey("Foobar", t, FailureHalts, func() {}, "This is not supposed to be here") +} + +func TestExtraConveyParameters2(t *testing.T) { + defer expectPanic(t, parseError) + + prepare() + + Convey("Foobar", t, func() {}, "This is not supposed to be here") +} + +func TestExtraConveyParameters3(t *testing.T) { + defer expectPanic(t, parseError) + + output := prepare() + + Convey("A", t, func() { + output += "A " + + Convey("B", func() { + output += "B " + }, "This is not supposed to be here") + }) + + expectEqual(t, "A ", output) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/dependencies.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/dependencies.go new file mode 100644 index 0000000000..0839e27fdf --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/dependencies.go @@ -0,0 +1,4 @@ +package main + +import _ "github.com/jtolds/gls" +import _ "github.com/smartystreets/assertions" diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/assertion_examples_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/assertion_examples_test.go new file mode 100644 index 0000000000..54af874250 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/assertion_examples_test.go @@ -0,0 +1,126 @@ +package examples + +import ( + "bytes" + "io" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestAssertionsAreAvailableFromConveyPackage(t *testing.T) { + SetDefaultFailureMode(FailureContinues) + defer SetDefaultFailureMode(FailureHalts) + + Convey("Equality assertions should be accessible", t, func() { + thing1a := thing{a: "asdf"} + thing1b := thing{a: "asdf"} + thing2 := thing{a: "qwer"} + + So(1, ShouldEqual, 1) + So(1, ShouldNotEqual, 2) + So(1, ShouldAlmostEqual, 1.000000000000001) + So(1, ShouldNotAlmostEqual, 2, 0.5) + So(thing1a, ShouldResemble, thing1b) + So(thing1a, ShouldNotResemble, thing2) + So(&thing1a, ShouldPointTo, &thing1a) + So(&thing1a, ShouldNotPointTo, &thing1b) + So(nil, ShouldBeNil) + So(1, ShouldNotBeNil) + So(true, ShouldBeTrue) + So(false, ShouldBeFalse) + So(0, ShouldBeZeroValue) + So(1, ShouldNotBeZeroValue) + }) + + Convey("Numeric comparison assertions should be accessible", t, func() { + So(1, ShouldBeGreaterThan, 0) + So(1, ShouldBeGreaterThanOrEqualTo, 1) + So(1, ShouldBeLessThan, 2) + So(1, ShouldBeLessThanOrEqualTo, 1) + So(1, ShouldBeBetween, 0, 2) + So(1, ShouldNotBeBetween, 2, 4) + So(1, ShouldBeBetweenOrEqual, 1, 2) + So(1, ShouldNotBeBetweenOrEqual, 2, 4) + }) + + Convey("Container assertions should be accessible", t, func() { + So([]int{1, 2, 3}, ShouldContain, 2) + So([]int{1, 2, 3}, ShouldNotContain, 4) + So(map[int]int{1: 1, 2: 2, 3: 3}, ShouldContainKey, 2) + So(map[int]int{1: 1, 2: 2, 3: 3}, ShouldNotContainKey, 4) + So(1, ShouldBeIn, []int{1, 2, 3}) + So(4, ShouldNotBeIn, []int{1, 2, 3}) + So([]int{}, ShouldBeEmpty) + So([]int{1}, ShouldNotBeEmpty) + So([]int{1, 2}, ShouldHaveLength, 2) + }) + + Convey("String assertions should be accessible", t, func() { + So("asdf", ShouldStartWith, "a") + So("asdf", ShouldNotStartWith, "z") + So("asdf", ShouldEndWith, "df") + So("asdf", ShouldNotEndWith, "as") + So("", ShouldBeBlank) + So("asdf", ShouldNotBeBlank) + So("asdf", ShouldContainSubstring, "sd") + So("asdf", ShouldNotContainSubstring, "af") + }) + + Convey("Panic recovery assertions should be accessible", t, func() { + So(panics, ShouldPanic) + So(func() {}, ShouldNotPanic) + So(panics, ShouldPanicWith, "Goofy Gophers!") + So(panics, ShouldNotPanicWith, "Guileless Gophers!") + }) + + Convey("Type-checking assertions should be accessible", t, func() { + + // NOTE: Values or pointers may be checked. If a value is passed, + // it will be cast as a pointer to the value to avoid cases where + // the struct being tested takes pointer receivers. Go allows values + // or pointers to be passed as receivers on methods with a value + // receiver, but only pointers on methods with pointer receivers. + // See: + // http://golang.org/doc/effective_go.html#pointers_vs_values + // http://golang.org/doc/effective_go.html#blank_implements + // http://blog.golang.org/laws-of-reflection + + So(1, ShouldHaveSameTypeAs, 0) + So(1, ShouldNotHaveSameTypeAs, "1") + + So(bytes.NewBufferString(""), ShouldImplement, (*io.Reader)(nil)) + So("string", ShouldNotImplement, (*io.Reader)(nil)) + }) + + Convey("Time assertions should be accessible", t, func() { + january1, _ := time.Parse(timeLayout, "2013-01-01 00:00") + january2, _ := time.Parse(timeLayout, "2013-01-02 00:00") + january3, _ := time.Parse(timeLayout, "2013-01-03 00:00") + january4, _ := time.Parse(timeLayout, "2013-01-04 00:00") + january5, _ := time.Parse(timeLayout, "2013-01-05 00:00") + oneDay, _ := time.ParseDuration("24h0m0s") + + So(january1, ShouldHappenBefore, january4) + So(january1, ShouldHappenOnOrBefore, january1) + So(january2, ShouldHappenAfter, january1) + So(january2, ShouldHappenOnOrAfter, january2) + So(january3, ShouldHappenBetween, january2, january5) + So(january3, ShouldHappenOnOrBetween, january3, january5) + So(january1, ShouldNotHappenOnOrBetween, january2, january5) + So(january2, ShouldHappenWithin, oneDay, january3) + So(january5, ShouldNotHappenWithin, oneDay, january1) + So([]time.Time{january1, january2}, ShouldBeChronological) + }) +} + +type thing struct { + a string +} + +func panics() { + panic("Goofy Gophers!") +} + +const timeLayout = "2006-01-02 15:04" diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/bowling_game.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/bowling_game.go new file mode 100644 index 0000000000..547bf93d1c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/bowling_game.go @@ -0,0 +1,75 @@ +package examples + +// Game contains the state of a bowling game. +type Game struct { + rolls []int + current int +} + +// NewGame allocates and starts a new game of bowling. +func NewGame() *Game { + game := new(Game) + game.rolls = make([]int, maxThrowsPerGame) + return game +} + +// Roll rolls the ball and knocks down the number of pins specified by pins. +func (self *Game) Roll(pins int) { + self.rolls[self.current] = pins + self.current++ +} + +// Score calculates and returns the player's current score. +func (self *Game) Score() (sum int) { + for throw, frame := 0, 0; frame < framesPerGame; frame++ { + if self.isStrike(throw) { + sum += self.strikeBonusFor(throw) + throw += 1 + } else if self.isSpare(throw) { + sum += self.spareBonusFor(throw) + throw += 2 + } else { + sum += self.framePointsAt(throw) + throw += 2 + } + } + return sum +} + +// isStrike determines if a given throw is a strike or not. A strike is knocking +// down all pins in one throw. +func (self *Game) isStrike(throw int) bool { + return self.rolls[throw] == allPins +} + +// strikeBonusFor calculates and returns the strike bonus for a throw. +func (self *Game) strikeBonusFor(throw int) int { + return allPins + self.framePointsAt(throw+1) +} + +// isSpare determines if a given frame is a spare or not. A spare is knocking +// down all pins in one frame with two throws. +func (self *Game) isSpare(throw int) bool { + return self.framePointsAt(throw) == allPins +} + +// spareBonusFor calculates and returns the spare bonus for a throw. +func (self *Game) spareBonusFor(throw int) int { + return allPins + self.rolls[throw+2] +} + +// framePointsAt computes and returns the score in a frame specified by throw. +func (self *Game) framePointsAt(throw int) int { + return self.rolls[throw] + self.rolls[throw+1] +} + +const ( + // allPins is the number of pins allocated per fresh throw. + allPins = 10 + + // framesPerGame is the number of frames per bowling game. + framesPerGame = 10 + + // maxThrowsPerGame is the maximum number of throws possible in a single game. + maxThrowsPerGame = 21 +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/bowling_game_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/bowling_game_test.go new file mode 100644 index 0000000000..18e997d44a --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/bowling_game_test.go @@ -0,0 +1,80 @@ +/* + +Reference: http://butunclebob.com/ArticleS.UncleBob.TheBowlingGameKata + +See the very first link (which happens to be the very first word of +the first paragraph) on the page for a tutorial. + +*/ + +package examples + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestBowlingGameScoring(t *testing.T) { + Convey("Given a fresh score card", t, func() { + game := NewGame() + + Convey("When all gutter balls are thrown", func() { + game.rollMany(20, 0) + + Convey("The score should be zero", func() { + So(game.Score(), ShouldEqual, 0) + }) + }) + + Convey("When all throws knock down only one pin", func() { + game.rollMany(20, 1) + + Convey("The score should be 20", func() { + So(game.Score(), ShouldEqual, 20) + }) + }) + + Convey("When a spare is thrown", func() { + game.rollSpare() + game.Roll(3) + game.rollMany(17, 0) + + Convey("The score should include a spare bonus.", func() { + So(game.Score(), ShouldEqual, 16) + }) + }) + + Convey("When a strike is thrown", func() { + game.rollStrike() + game.Roll(3) + game.Roll(4) + game.rollMany(16, 0) + + Convey("The score should include a strike bonus.", func() { + So(game.Score(), ShouldEqual, 24) + }) + }) + + Convey("When all strikes are thrown", func() { + game.rollMany(21, 10) + + Convey("The score should be 300.", func() { + So(game.Score(), ShouldEqual, 300) + }) + }) + }) +} + +func (self *Game) rollMany(times, pins int) { + for x := 0; x < times; x++ { + self.Roll(pins) + } +} +func (self *Game) rollSpare() { + self.Roll(5) + self.Roll(5) +} +func (self *Game) rollStrike() { + self.Roll(10) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/doc.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/doc.go new file mode 100644 index 0000000000..dae661e18d --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/doc.go @@ -0,0 +1,5 @@ +// Package examples contains, well, examples of how to use goconvey to +// specify behavior of a system under test. It contains a well-known example +// by Robert C. Martin called "Bowling Game Kata" as well as another very +// trivial example that demonstrates Reset() and some of the assertions. +package examples diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/examples.goconvey b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/examples.goconvey new file mode 100644 index 0000000000..b5c805fbf4 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/examples.goconvey @@ -0,0 +1,12 @@ +// Uncomment the next line to disable the package when running the GoConvey UI: +//IGNORE + +// Uncomment the next line to limit testing to the specified test function name pattern: +//-run=TestAssertionsAreAvailableFromConveyPackage + +// Uncomment the next line to limit testing to those tests that don't bail when testing.Short() is true: +//-short + +// include any additional `go test` flags or application-specific flags below: + +-timeout=1s diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/simple_example_test.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/simple_example_test.go new file mode 100644 index 0000000000..dadfd8136a --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/examples/simple_example_test.go @@ -0,0 +1,36 @@ +package examples + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestIntegerManipulation(t *testing.T) { + t.Parallel() + + Convey("Given a starting integer value", t, func() { + x := 42 + + Convey("When incremented", func() { + x++ + + Convey("The value should be greater by one", func() { + So(x, ShouldEqual, 43) + }) + Convey("The value should NOT be what it used to be", func() { + So(x, ShouldNotEqual, 42) + }) + }) + Convey("When decremented", func() { + x-- + + Convey("The value should be lesser by one", func() { + So(x, ShouldEqual, 41) + }) + Convey("The value should NOT be what it used to be", func() { + So(x, ShouldNotEqual, 42) + }) + }) + }) +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/go.mod b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/go.mod new file mode 100644 index 0000000000..5764a0a35e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/go.mod @@ -0,0 +1,8 @@ +module github.com/smartystreets/goconvey + +require ( + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect + github.com/jtolds/gls v4.20.0+incompatible + github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d + golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/go.sum b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/go.sum new file mode 100644 index 0000000000..4267185d8e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/go.sum @@ -0,0 +1,12 @@ +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey.go new file mode 100644 index 0000000000..2e1be6740b --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey.go @@ -0,0 +1,293 @@ +// This executable provides an HTTP server that watches for file system changes +// to .go files within the working directory (and all nested go packages). +// Navigating to the configured host and port in a web browser will display the +// latest results of running `go test` in each go package. +package main + +import ( + "flag" + "fmt" + "log" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/smartystreets/goconvey/web/server/api" + "github.com/smartystreets/goconvey/web/server/contract" + "github.com/smartystreets/goconvey/web/server/executor" + "github.com/smartystreets/goconvey/web/server/messaging" + "github.com/smartystreets/goconvey/web/server/parser" + "github.com/smartystreets/goconvey/web/server/system" + "github.com/smartystreets/goconvey/web/server/watch" +) + +func init() { + flags() + folders() +} +func flags() { + flag.IntVar(&port, "port", 8080, "The port at which to serve http.") + flag.StringVar(&host, "host", "127.0.0.1", "The host at which to serve http.") + flag.DurationVar(&nap, "poll", quarterSecond, "The interval to wait between polling the file system for changes.") + flag.IntVar(¶llelPackages, "packages", 10, "The number of packages to test in parallel. Higher == faster but more costly in terms of computing.") + flag.StringVar(&gobin, "gobin", "go", "The path to the 'go' binary (default: search on the PATH).") + flag.BoolVar(&cover, "cover", true, "Enable package-level coverage statistics. Requires Go 1.2+ and the go cover tool.") + flag.IntVar(&depth, "depth", -1, "The directory scanning depth. If -1, scan infinitely deep directory structures. 0: scan working directory. 1+: Scan into nested directories, limited to value.") + flag.StringVar(&timeout, "timeout", "0", "The test execution timeout if none is specified in the *.goconvey file (default is '0', which is the same as not providing this option).") + flag.StringVar(&watchedSuffixes, "watchedSuffixes", ".go", "A comma separated list of file suffixes to watch for modifications.") + flag.StringVar(&excludedDirs, "excludedDirs", "vendor,node_modules", "A comma separated list of directories that will be excluded from being watched") + flag.StringVar(&workDir, "workDir", "", "set goconvey working directory (default current directory)") + flag.BoolVar(&autoLaunchBrowser, "launchBrowser", true, "toggle auto launching of browser (default: true)") + + log.SetOutput(os.Stdout) + log.SetFlags(log.LstdFlags | log.Lshortfile) +} +func folders() { + _, file, _, _ := runtime.Caller(0) + here := filepath.Dir(file) + static = filepath.Join(here, "/web/client") + reports = filepath.Join(static, "reports") +} + +func main() { + flag.Parse() + log.Printf(initialConfiguration, host, port, nap, cover) + + working := getWorkDir() + cover = coverageEnabled(cover, reports) + shell := system.NewShell(gobin, reports, cover, timeout) + + watcherInput := make(chan messaging.WatcherCommand) + watcherOutput := make(chan messaging.Folders) + excludedDirItems := strings.Split(excludedDirs, `,`) + watcher := watch.NewWatcher(working, depth, nap, watcherInput, watcherOutput, watchedSuffixes, excludedDirItems) + + parser := parser.NewParser(parser.ParsePackageResults) + tester := executor.NewConcurrentTester(shell) + tester.SetBatchSize(parallelPackages) + + longpollChan := make(chan chan string) + executor := executor.NewExecutor(tester, parser, longpollChan) + server := api.NewHTTPServer(working, watcherInput, executor, longpollChan) + listener := createListener() + go runTestOnUpdates(watcherOutput, executor, server) + go watcher.Listen() + if autoLaunchBrowser { + go launchBrowser(listener.Addr().String()) + } + serveHTTP(server, listener) +} + +func browserCmd() (string, bool) { + browser := map[string]string{ + "darwin": "open", + "linux": "xdg-open", + "windows": "start", + } + cmd, ok := browser[runtime.GOOS] + return cmd, ok +} + +func launchBrowser(addr string) { + browser, ok := browserCmd() + if !ok { + log.Printf("Skipped launching browser for this OS: %s", runtime.GOOS) + return + } + + log.Printf("Launching browser on %s", addr) + url := fmt.Sprintf("http://%s", addr) + cmd := exec.Command(browser, url) + + output, err := cmd.CombinedOutput() + if err != nil { + log.Println(err) + } + log.Println(string(output)) +} + +func runTestOnUpdates(queue chan messaging.Folders, executor contract.Executor, server contract.Server) { + for update := range queue { + log.Println("Received request from watcher to execute tests...") + packages := extractPackages(update) + output := executor.ExecuteTests(packages) + root := extractRoot(update, packages) + server.ReceiveUpdate(root, output) + } +} + +func extractPackages(folderList messaging.Folders) []*contract.Package { + packageList := []*contract.Package{} + for _, folder := range folderList { + hasImportCycle := testFilesImportTheirOwnPackage(folder.Path) + packageName := resolvePackageName(folder.Path) + packageList = append( + packageList, + contract.NewPackage(folder, packageName, hasImportCycle), + ) + } + return packageList +} + +func extractRoot(folderList messaging.Folders, packageList []*contract.Package) string { + path := packageList[0].Path + folder := folderList[path] + return folder.Root +} + +func createListener() net.Listener { + l, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + log.Println(err) + } + if l == nil { + os.Exit(1) + } + return l +} + +func serveHTTP(server contract.Server, listener net.Listener) { + serveStaticResources() + serveAjaxMethods(server) + activateServer(listener) +} + +func serveStaticResources() { + http.Handle("/", http.FileServer(http.Dir(static))) +} + +func serveAjaxMethods(server contract.Server) { + http.HandleFunc("/watch", server.Watch) + http.HandleFunc("/ignore", server.Ignore) + http.HandleFunc("/reinstate", server.Reinstate) + http.HandleFunc("/latest", server.Results) + http.HandleFunc("/execute", server.Execute) + http.HandleFunc("/status", server.Status) + http.HandleFunc("/status/poll", server.LongPollStatus) + http.HandleFunc("/pause", server.TogglePause) +} + +func activateServer(listener net.Listener) { + log.Printf("Serving HTTP at: http://%s\n", listener.Addr()) + err := http.Serve(listener, nil) + if err != nil { + log.Println(err) + } +} + +func coverageEnabled(cover bool, reports string) bool { + return (cover && + goMinVersion(1, 2) && + coverToolInstalled() && + ensureReportDirectoryExists(reports)) +} +func goMinVersion(wanted ...int) bool { + version := runtime.Version() // 'go1.2....' + s := regexp.MustCompile(`go([\d]+)\.([\d]+)\.?([\d]+)?`).FindAllStringSubmatch(version, 1) + if len(s) == 0 { + log.Printf("Cannot determine if newer than go1.2, disabling coverage.") + return false + } + for idx, str := range s[0][1:] { + if len(wanted) == idx { + break + } + if v, _ := strconv.Atoi(str); v < wanted[idx] { + log.Printf(pleaseUpgradeGoVersion, version) + return false + } + } + return true +} +func coverToolInstalled() bool { + working := getWorkDir() + command := system.NewCommand(working, "go", "tool", "cover").Execute() + installed := strings.Contains(command.Output, "Usage of 'go tool cover':") + if !installed { + log.Print(coverToolMissing) + return false + } + return true +} +func ensureReportDirectoryExists(reports string) bool { + result, err := exists(reports) + if err != nil { + log.Fatal(err) + } + if result { + return true + } + + if err := os.Mkdir(reports, 0755); err == nil { + return true + } + + log.Printf(reportDirectoryUnavailable, reports) + return false +} +func exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} +func getWorkDir() string { + working := "" + var err error + if workDir != "" { + working = workDir + } else { + working, err = os.Getwd() + if err != nil { + log.Fatal(err) + } + } + result, err := exists(working) + if err != nil { + log.Fatal(err) + } + if !result { + log.Fatalf("Path:%s does not exists", working) + } + return working +} + +var ( + port int + host string + gobin string + nap time.Duration + parallelPackages int + cover bool + depth int + timeout string + watchedSuffixes string + excludedDirs string + autoLaunchBrowser bool + + static string + reports string + + quarterSecond = time.Millisecond * 250 + workDir string +) + +const ( + initialConfiguration = "Initial configuration: [host: %s] [port: %d] [poll: %v] [cover: %v]\n" + pleaseUpgradeGoVersion = "Go version is less that 1.2 (%s), please upgrade to the latest stable version to enable coverage reporting.\n" + coverToolMissing = "Go cover tool is not installed or not accessible: for Go < 1.5 run`go get golang.org/x/tools/cmd/cover`\n For >= Go 1.5 run `go install $GOROOT/src/cmd/cover`\n" + reportDirectoryUnavailable = "Could not find or create the coverage report directory (at: '%s'). You probably won't see any coverage statistics...\n" + separator = string(filepath.Separator) + endGoPath = separator + "src" + separator +) diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey_1_8.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey_1_8.go new file mode 100644 index 0000000000..a40694c26e --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey_1_8.go @@ -0,0 +1,42 @@ +// +build !go1.9 + +// To work correctly with out of GOPATH modules, some functions needed to +// switch from using go/build to golang.org/x/tools/go/packages. But that +// package depends on changes to go/types that were introduced in Go 1.9. Since +// modules weren't introduced until Go 1.11, users of Go 1.8 or below can't be +// using modules, so they can continue to use go/build. + +package main + +import ( + "go/build" + "strings" +) + +// This method exists because of a bug in the go cover tool that +// causes an infinite loop when you try to run `go test -cover` +// on a package that has an import cycle defined in one of it's +// test files. Yuck. +func testFilesImportTheirOwnPackage(packagePath string) bool { + meta, err := build.ImportDir(packagePath, build.AllowBinary) + if err != nil { + return false + } + + for _, dependency := range meta.TestImports { + if dependency == meta.ImportPath { + return true + } + } + return false +} + +func resolvePackageName(path string) string { + pkg, err := build.ImportDir(path, build.FindOnly) + if err == nil { + return pkg.ImportPath + } + + nameArr := strings.Split(path, endGoPath) + return nameArr[len(nameArr)-1] +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey_1_9.go b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey_1_9.go new file mode 100644 index 0000000000..9a5c0bd9a1 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/goconvey_1_9.go @@ -0,0 +1,64 @@ +// +build go1.9 + +// To work correctly with out of GOPATH modules, some functions needed to +// switch from using go/build to golang.org/x/tools/go/packages. But that +// package depends on changes to go/types that were introduced in Go 1.9. Since +// modules weren't introduced until Go 1.11, using +// golang.org/x/tools/go/packages can safely be restricted to users of Go 1.9 +// or above. +package main + +import ( + "fmt" + "strings" + + "golang.org/x/tools/go/packages" +) + +// This method exists because of a bug in the go cover tool that +// causes an infinite loop when you try to run `go test -cover` +// on a package that has an import cycle defined in one of it's +// test files. Yuck. +func testFilesImportTheirOwnPackage(packagePath string) bool { + meta, err := packages.Load( + &packages.Config{ + Mode: packages.NeedName | packages.NeedImports, + Tests: true, + }, + packagePath, + ) + if err != nil { + return false + } + + testPackageID := fmt.Sprintf("%s [%s.test]", meta[0], meta[0]) + + for _, testPackage := range meta[1:] { + if testPackage.ID != testPackageID { + continue + } + + for dependency := range testPackage.Imports { + if dependency == meta[0].PkgPath { + return true + } + } + break + } + return false +} + +func resolvePackageName(path string) string { + pkg, err := packages.Load( + &packages.Config{ + Mode: packages.NeedName, + }, + path, + ) + if err == nil { + return pkg[0].PkgPath + } + + nameArr := strings.Split(path, endGoPath) + return nameArr[len(nameArr)-1] +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/composer.html b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/composer.html new file mode 100644 index 0000000000..48ee57e90a --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/composer.html @@ -0,0 +1,35 @@ + + + + + GoConvey Composer + + + + + + + +
+

+ + +

+
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/favicon.ico b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/favicon.ico new file mode 100644 index 0000000000..bb3df78c2a Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/favicon.ico differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/index.html b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/index.html new file mode 100644 index 0000000000..490e4cb516 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/index.html @@ -0,0 +1,516 @@ + + + + GoConvey + + + + + + + + + + + + + + + + + + + + + + +
+
+
PASS
+
+ +
+ Controls +
+ +
+
+ + +
+ +
+ +
    +
  • +
  • +
  • +
  • +
  • +
  • +
+
+
+ NOTICE: + +
+ +
+ + + + + + + + + +
+
+ + + + + + +
+ + +
+
+ Coverage +
+
+ + + + +
+ Ignored +
+
+ + + +
+ No Test Functions +
+
+ + + +
+ No Test Files +
+
+ + + + +
+ No Go Files +
+
+ +
+ + + + + + + + + + + +
+ +
+ Build Failures +
+
+ + + + +
+ Panics +
+
+ + + + + +
+ Failures +
+
+ + + + + +
+ Stories +
+
+ + +
+ + + +
+
+ LOG +
+
+ +
+ +
+ +
+
+ + + Last test + + + + + + + + : + / + / + + +
+
+ + + LIVE + + + REPLAY + + + PAUSED + + + + + + +
+
+ + + diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/common.css b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/common.css new file mode 100644 index 0000000000..86a595acad --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/common.css @@ -0,0 +1,962 @@ +/* Eric Meyer's Reset CSS v2.0 */ +html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{border:0;font-size:100%;font:inherit;vertical-align:baseline;margin:0;padding:0}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block}body{line-height:1}ol,ul{list-style:none}blockquote,q{quotes:none}blockquote:before,blockquote:after,q:before,q:after{content:none}table{border-collapse:collapse;border-spacing:0} + +@font-face { + font-family: 'Open Sans'; + src: local("Open Sans"), url("../fonts/Open_Sans/OpenSans-Regular.ttf"); +} +@font-face { + font-family: 'Orbitron'; + src: local("Orbitron"), url("../fonts/Orbitron/Orbitron-Regular.ttf"); +} +@font-face { + font-family: 'Oswald'; + src: local("Oswald"), url("../fonts/Oswald/Oswald-Regular.ttf"); +} + +::selection { + background: #87AFBC; + color: #FFF; + text-shadow: none; +} + +::-moz-selection { + background: #87AFBC; + color: #FFF; + text-shadow: none; +} + +::-webkit-input-placeholder { + font-style: italic; +} +:-moz-placeholder { + font-style: italic; +} +::-moz-placeholder { + font-style: italic; +} +:-ms-input-placeholder { + font-style: italic; +} + + + +html, body { + height: 100%; + min-height: 100%; +} + +body { + -webkit-transform: translate3d(0, 0, 0); /* attempts to fix Chrome glitching on Mac */ + background-position: fixed; + background-repeat: no-repeat; + font-family: Menlo, Monaco, 'Courier New', monospace; + line-height: 1.5em; + font-size: 14px; + overflow: hidden; + display: none; +} + +a { + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +a.fa { + text-decoration: none; +} + +b { + font-weight: bold; +} + +i { + font-style: italic; +} + +hr { + border: 0; + background: 0; + height: 0; + margin: 0; + padding: 0; +} + +input[type=text] { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + + background: none; + border: none; + border-bottom-width: 1px; + border-bottom-style: solid; + outline: none; + padding-bottom: .1em; + font: 300 18px/1.5em 'Open Sans', sans-serif; +} + +.overall { + padding: 30px 0 15px; + position: relative; + z-index: 50; +} + +.status { + line-height: 1em; + font-family: 'Orbitron', monospace; + text-align: center; +} + +.overall .status { + font-size: 46px; + letter-spacing: 5px; + text-transform: uppercase; + white-space: nowrap; +} + +.toggler { + font-size: 10px; + padding: 3px 5px; + text-decoration: none; + text-transform: uppercase; + cursor: pointer; + line-height: 1.5em; +} + +.toggler.narrow { + display: none; +} + +.togglable { + overflow-x: auto; +} + +.controls { + font-size: 18px; + line-height: 1em; +} + +.controls li { + text-decoration: none; + display: block; + float: left; + padding: .75em; + cursor: pointer; +} + +.server-down { + display: none; + text-align: center; + padding: 10px 0; +} + +footer .server-down { + padding: 8px 15px; + text-transform: uppercase; +} + +#logo { + font-family: 'Oswald', 'Impact', 'Arial Black', sans-serif; +} + +#path-container { + margin-top: .4em; +} + +#path { + width: 100%; + text-align: center; + border-bottom-width: 0; +} + +#path:hover, +#path:focus { + border-bottom-width: 1px; +} + +.expandable { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + + border-top-width: 1px; + border-top-style: solid; + overflow-y: hidden; + overflow-x: auto; + text-align: center; + white-space: nowrap; + display: none; +} + +.settings { + white-space: normal; + overflow-x: auto; + white-space: nowrap; +} + +.settings .setting-meta, +.settings .setting-val { + display: inline-block; +} + +.settings .container { + padding: 15px 0; +} + +.settings .setting { + font-size: 13px; + display: inline-block; + margin-right: 5%; +} + +.settings .setting:first-child { + margin-left: 5%; +} + +.settings .setting .setting-meta { + text-align: right; + padding-right: 1em; + vertical-align: middle; + max-width: 150px; +} + +.settings .setting .setting-meta small { + font-size: 8px; + text-transform: uppercase; + display: block; + line-height: 1.25em; +} + +.history .container { + padding: 15px 0 15px 25%; +} + +.history .item { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + + transition: all .1s linear; + -moz-transition: all .1s linear; + -webkit-transition: all .1s linear; + -o-transition: all .1s linear; + + display: inline-block; + text-align: left; + margin: 0 20px; + padding: 20px; + height: 100%; + width: 175px; + opacity: .7; + cursor: pointer; +} + +.history .item:hover { + opacity: 1; +} + +.history .item:nth-child(odd):hover { + -webkit-transform: scale(1.1) rotate(5deg); + -moz-transform: scale(1.1) rotate(5deg); +} + +.history .item:nth-child(even):hover { + -webkit-transform: scale(1.1) rotate(-5deg); + -moz-transform: scale(1.1) rotate(-5deg); +} + +.history .item .summary { + font: 14px/1.5em 'Monaco', 'Menlo', 'Courier New', monospace; +} + +.history .item.selected { + opacity: 1; +} + +.history .status { + font-size: 13px; +} + + + + + + +.frame { + position: relative; + z-index: 0; + width: 100%; +} + +.frame .col { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + + border-right-width: 1px; + border-right-style: solid; + float: left; + height: 100%; + overflow-y: auto; +} + +.frame .col:first-child { + border-left: none; +} + +.frame .col:last-child { + border-right: none; +} + + +#col-1 { + width: 15%; +} + +#col-2 { + width: 60%; +} + +#col-3 { + width: 25%; +} + +#coverage { + font-size: 10px; + white-space: nowrap; +} + +#coverage-color-template { + display: none; +} + +.rtl { + direction: rtl; +} + +.pkg-cover { + position: relative; +} + +.pkg-cover a { + color: inherit !important; + text-decoration: none; +} + +.pkg-cover-bar { + position: absolute; + top: 0; + left: 0; + height: 100%; + z-index: 1; +} + +.pkg-cover-name { + position: relative; + z-index: 2; +} + +.pkg-cover-name, +.pkg-list { + font-family: 'Menlo', monospace; + font-size: 10px; + padding-right: 2%; + white-space: nowrap; +} + +.buildfail-pkg, +.panic-pkg, +.failure-pkg { + padding: 5px 10px; + font: 14px 'Open Sans', sans-serif; +} + +.buildfail-output, +.panic-output, +.failure-output { + padding: 10px; + font-size: 12px; + line-height: 1.25em; + overflow-y: auto; + white-space: pre-wrap; + font-family: 'Menlo', monospace; +} + +.panic-story, +.failure-story { + font-size: 10px; + line-height: 1.25em; + font-family: 'Open Sans', sans-serif; +} + +.panic-summary { + font-size: 14px; + font-weight: bold; + line-height: 1.5em; +} + +.panic-file, +.failure-file { + font-size: 13px; + line-height: 1.5em; +} + +.diffviewer { + border-collapse: collapse; + width: 100%; +} + +.diffviewer td { + border-bottom-width: 1px; + border-bottom-style: solid; + padding: 2px 5px; + font-size: 14px; +} + +.diffviewer .original, +.diffviewer .changed, +.diffviewer .diff { + white-space: pre-wrap; +} + +.diffviewer tr:first-child td { + border-top-width: 1px; + border-top-style: solid; +} + +.diffviewer td:first-child { + width: 65px; + font-size: 10px; + border-right-width: 1px; + border-right-style: solid; + text-transform: uppercase; +} + +.diff ins { + text-decoration: none; +} + + + +#stories table { + width: 100%; +} + + +.story-pkg { + cursor: pointer; +} + +.story-pkg td { + font: 16px 'Open Sans', sans-serif; + white-space: nowrap; + padding: 10px; +} + +.story-pkg td:first-child { + width: 1em; +} + +.story-line { + font: 12px 'Open Sans', sans-serif; + cursor: default; +} + +.story-line td { + padding-top: 7px; + padding-bottom: 7px; +} + +.pkg-toggle-container { + position: relative; + display: inline-block; +} + +.toggle-all-pkg { + font-size: 10px; + text-transform: uppercase; + position: absolute; + padding: 5px; + font-family: 'Menlo', 'Open Sans', sans-serif; + display: none; +} + +.story-line-summary-container { + padding: 0 10px 0 10px; + white-space: nowrap; + width: 35px; + text-align: center; +} + +.story-line-status { + width: 6px; + min-width: 6px; + height: 100%; +} + +.story-line-desc { + padding: 5px; +} + +.story-line-desc .message { + font-family: 'Menlo', monospace; + white-space: pre-wrap; +} + +.statusicon { + font: 14px 'Open Sans', sans-serif; +} + +.statusicon.skip { + font-size: 16px; +} + + +.depth-0 { padding-left: 1.5em !important; } +.depth-1 { padding-left: 3em !important; } +.depth-2 { padding-left: 4.5em !important; } +.depth-3 { padding-left: 6em !important; } +.depth-4 { padding-left: 7.5em !important; } +.depth-5 { padding-left: 9em !important; } +.depth-6 { padding-left: 10.5em !important; } +.depth-7 { padding-left: 11em !important; } + + +.log { + font-size: 11px; + line-height: 1.5em; + padding: 5px; + padding-bottom: .5em; +} + +.log .line { + white-space: pre-wrap; + padding-left: 2em; + text-indent: -2em; +} + + + + + +footer { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + + position: absolute; + bottom: 0; + left: 0; + padding: 5px 15px; + width: 100%; + border-top-width: 1px; + border-top-style: solid; + font-size: 12px; +} + +footer section { + float: left; +} + +footer section:first-child { + width: 80%; +} + +footer section:last-child { + text-align: right; + width: 20%; +} + +footer .info { + padding: 0 10px; +} + +footer .info:first-child { + padding-left: 0; +} + +#narrow-summary { + display: none; +} + +footer .replay, +footer .paused { + display: none; +} + +footer .replay { + cursor: pointer; +} + +footer .server-down .notice-message { + font-size: 10px; +} + + + + +.rel { + position: relative; +} + +.text-right { + text-align: right; +} + +.text-center { + text-align: center; +} + +.text-left { + text-align: left; +} + +.float-left { + float: left; +} + +.float-right { + float: right; +} + +.clear { + clear: both; +} + +.nowrap { + white-space: nowrap; +} + +.clr-blue { + color: #2B597F; +} + +.show { + display: block; +} + +.hide { + display: none; +} + +.enum { + cursor: pointer; + display: inline-block; + font-size: 12px; + border-width: 1px; + border-style: solid; + border-radius: 9px; + vertical-align: middle; +} + +.enum > li { + display: block; + float: left; + padding: 5px 12px; + border-left-width: 1px; + border-left-style: solid; +} + +.enum > li:first-child { + border-left: 0px; + border-top-left-radius: 8px; + border-bottom-left-radius: 8px; +} + +.enum > li:last-child { + border-top-right-radius: 8px; + border-bottom-right-radius: 8px; +} + + + + + + + + +.disabled { + cursor: default !important; + background: transparent !important; +} + +.spin-once { + -webkit-animation: fa-spin 0.5s 1 ease; + animation: fa-spin 0.5s 1 ease; +} + +.spin-slowly { + -webkit-animation: fa-spin .75s infinite linear; + animation: fa-spin .75s infinite linear; +} + +.throb { + -webkit-animation: throb 2.5s ease-in-out infinite; + -moz-animation: throb 2.5s ease-in-out infinite; + -o-animation: throb 2.5s ease-in-out infinite; + animation: throb 2.5s ease-in-out infinite; +} + +.flash { + -webkit-animation: flash 4s linear infinite; + -moz-animation: flash 4s linear infinite; + -o-animation: flash 4s linear infinite; + animation: flash 4s linear infinite; +} + + + + + +/* Clearfix */ +.cf:before, +.cf:after { + content: " "; + display: table; +} +.cf:after { + clear: both; +} + + + + + + +@media (max-width: 1099px) { + #col-1 { + width: 25%; + } + + #col-2 { + width: 75%; + border-right: none; + } + + #col-3 { + display: none; + } + + footer #duration { + display: none; + } +} + +@media (max-width: 900px) { + footer #last-test-container { + display: none; + } +} + +@media (min-width: 850px) and (max-width: 1220px) { + #path { + font-size: 14px; + margin-top: 5px; + } +} + +@media (min-width: 700px) and (max-width: 849px) { + #path { + font-size: 12px; + margin-top: 8px; + } +} + +@media (max-width: 799px) { + #col-1 { + display: none; + } + + #col-2 { + width: 100%; + } + + #stories .story-pkg-name { + font-size: 14px; + } + + #stories .story-pkg-watch-td { + display: none; + } +} + +@media (max-width: 700px) { + #path-container { + display: none; + } + + footer #time { + display: none; + } + + footer .info { + padding: 0 5px; + } + + footer .server-down .notice-message { + display: none; + } +} + +@media (max-width: 499px) { + .toggler.narrow { + display: block; + } + + #show-gen { + display: none; + } + + .hide-narrow { + display: none; + } + + .show-narrow { + display: block; + } + + .overall .status { + font-size: 28px; + letter-spacing: 1px; + } + + .toggler { + display: block; + } + + .controls ul { + text-align: center; + float: none; + } + + .controls li { + display: inline-block; + float: none; + } + + .enum > li { + float: left; + display: block; + } + + #logo { + display: none; + } + + .history .item { + margin: 0 5px; + } + + .history .item .summary { + display: none; + } + + .server-down { + font-size: 14px; + } + + #stories .story-pkg-name { + font-size: 16px; + } + + #stories .not-pkg-name { + display: none; + } + + footer #duration { + display: none; + } + + footer #summary { + display: none; + } + + footer #narrow-summary { + display: inline; + } +} + + + + +/** + Custom CSS Animations +**/ + + + +@-webkit-keyframes throb { + 0% { opacity: 1; } + 50% { opacity: .35; } + 100% { opacity: 1; } +} +@-moz-keyframes throb { + 0% { opacity: 1; } + 50% { opacity: .35; } + 100% { opacity: 1; } +} +@-o-keyframes throb { + 0% { opacity: 1; } + 50% { opacity: .35; } + 100% { opacity: 1; } +} +@keyframes throb { + 0% { opacity: 1; } + 50% { opacity: .35; } + 100% { opacity: 1; } +} + + +@-webkit-keyframes flash { + 70% { opacity: 1; } + 90% { opacity: 0; } + 98% { opacity: 0; } + 100% { opacity: 1; } +} +@-moz-keyframes flash { + 70% { opacity: 1; } + 90% { opacity: 0; } + 98% { opacity: 0; } + 100% { opacity: 1; } +} +@-o-keyframes flash { + 70% { opacity: 1; } + 90% { opacity: 0; } + 98% { opacity: 0; } + 100% { opacity: 1; } +} +@keyframes flash { + 70% { opacity: 1; } + 90% { opacity: 0; } + 98% { opacity: 0; } + 100% { opacity: 1; } +} + + + + + + + + + + + +/* +#coverage { + perspective: 1000; +} + +#coverage .pkg-cover { + -webkit-transition: .7s; + transform-style: preserve-3d; + position: relative; +} + +#coverage:hover .pkg-cover { + -webkit-transform: rotateX(180deg); +}*/ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/composer.css b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/composer.css new file mode 100644 index 0000000000..6dd344ba5c --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/composer.css @@ -0,0 +1,65 @@ +/* Eric Meyer's Reset CSS v2.0 */ +html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{border:0;font-size:100%;font:inherit;vertical-align:baseline;margin:0;padding:0}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block}body{line-height:1}ol,ul{list-style:none}blockquote,q{quotes:none}blockquote:before,blockquote:after,q:before,q:after{content:none}table{border-collapse:collapse;border-spacing:0} + +@font-face { + font-family: 'Open Sans'; + src: local("Open Sans"), url("../fonts/Open_Sans/OpenSans-Regular.ttf"); +} +@font-face { + font-family: 'Oswald'; + src: local("Oswald"), url("../fonts/Oswald/Oswald-Regular.ttf"); +} + +body { + font-family: 'Open Sans', 'Helvetica Neue', sans-serif; + font-size: 16px; +} + +header { + background: #2C3F49; + padding: 10px; +} + +.logo { + font-family: Oswald, sans-serif; + font-size: 24px; + margin-right: 5px; + color: #DDD; +} + +.afterlogo { + font-size: 12px; + text-transform: uppercase; + position: relative; + top: -3px; + color: #999; +} + +#input, +#output { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + + padding: 15px; + height: 80%; + float: left; + overflow: auto; +} + +#input { + border: 0; + font: 300 18px/1.5em 'Open Sans'; + resize: none; + outline: none; + width: 50%; +} + +#output { + width: 50%; + display: inline-block; + background: #F0F0F0; + font: 14px/1.25em 'Menlo', 'Monaco', 'Courier New', monospace; + border-left: 1px solid #CCC; + white-space: pre-wrap; +} \ No newline at end of file diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark-bigtext.css b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark-bigtext.css new file mode 100644 index 0000000000..38d7134020 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark-bigtext.css @@ -0,0 +1,400 @@ +/* This is a fork of the dark.css theme. The only changes from dark.css are near the very end. */ + +::-webkit-scrollbar { + width: 10px; + height: 10px; +} + +::-webkit-scrollbar-corner { + background: transparent; +} + +::-webkit-scrollbar-thumb { + background-color: rgba(255, 255, 255, .35); + border-radius: 10px; +} + +body { + color: #D0D0D0; + background: fixed #040607; + background: fixed -moz-linear-gradient(top, hsl(200,27%,2%) 0%, hsl(203,29%,26%) 100%); + background: fixed -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(200,27%,2%)), color-stop(100%,hsl(203,29%,26%))); + background: fixed -webkit-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + background: fixed -o-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + background: fixed -ms-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + background: fixed linear-gradient(to bottom, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#040607', endColorstr='#2f4756',GradientType=0 ); +} + +a, +.toggle-all-pkg { + color: #247D9E; +} + +a:hover, +.toggle-all-pkg:hover { + color: #33B5E5; +} + +input[type=text] { + border-bottom-color: #33B5E5; + color: #BBB; +} + +::-webkit-input-placeholder { + color: #555; +} +:-moz-placeholder { + color: #555; +} +::-moz-placeholder { + color: #555; +} +:-ms-input-placeholder { + color: #555; +} + +.overall { + /* + Using box-shadow here is not very performant but allows us + to animate the change of the background color much more easily. + This box-shadow is an ALTERNATIVE, not supplement, to using gradients + in this case. + */ + box-shadow: inset 0 150px 100px -110px rgba(0, 0, 0, .5); +} + +.overall.ok { + background: #688E00; +} + +.overall.fail { + background: #DB8700; +} + +.overall.panic { + background: #A80000; +} + +.overall.buildfail { + background: #A4A8AA; +} + +.overall .status { + color: #EEE; +} + +.server-down { + background: rgba(255, 45, 45, 0.55); + color: #FFF; +} + +.toggler { + background: #132535; +} + +.toggler:hover { + background: #1C374F; +} + +.controls { + border-bottom: 1px solid #33B5E5; +} + +.controls li { + color: #2A5A84; +} + +.controls li:hover { + background: #132535; + color: #33B5E5; +} + +.sel { + background: #33B5E5 !important; + color: #FFF !important; +} + +.pkg-cover-name { + text-shadow: 1px 1px 0px #000; +} + +.pkg-cover-name b, +.story-pkg-name b { + color: #FFF; + font-weight: bold; +} + +.pkg-cover:hover, +.pkg-cover:hover b { + color: #FFF; +} + +.expandable { + border-top-color: #33B5E5; +} + +.expandable { + background: rgba(0, 0, 0, .2); +} + +.history .item.ok { + background: #3f5400; + background: -moz-linear-gradient(top, hsl(75,100%,16%) 0%, hsl(76,100%,28%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(75,100%,16%)), color-stop(100%,hsl(76,100%,28%))); + background: -webkit-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + background: -o-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + background: -ms-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + background: linear-gradient(to bottom, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#3f5400', endColorstr='#698f00',GradientType=0 ); +} + +.history .item.fail { + background: #7f4e00; + background: -moz-linear-gradient(top, hsl(37,100%,25%) 0%, hsl(37,100%,43%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(37,100%,25%)), color-stop(100%,hsl(37,100%,43%))); + background: -webkit-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + background: -o-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + background: -ms-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + background: linear-gradient(to bottom, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#7f4e00', endColorstr='#db8700',GradientType=0 ); +} + +.history .item.panic { + background: #660000; + background: -moz-linear-gradient(top, hsl(0,100%,20%) 0%, hsl(0,100%,33%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(0,100%,20%)), color-stop(100%,hsl(0,100%,33%))); + background: -webkit-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + background: -o-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + background: -ms-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + background: linear-gradient(to bottom, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#660000', endColorstr='#a80000',GradientType=0 ); +} + +.history .item.buildfail { + background: #282f33; + background: -moz-linear-gradient(top, hsl(202,12%,18%) 0%, hsl(208,5%,48%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(202,12%,18%)), color-stop(100%,hsl(208,5%,48%))); + background: -webkit-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + background: -o-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + background: -ms-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + background: linear-gradient(to bottom, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#282f33', endColorstr='#757c82',GradientType=0 ); +} + +.enum { + border-color: #2B597F; +} + +.enum > li { + border-left-color: #2B597F; +} + +.enum > li:hover { + background: rgba(55, 114, 163, .25); +} + +.group { + background: -moz-linear-gradient(top, rgba(16,59,71,0) 0%, rgba(16,59,71,1) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,rgba(16,59,71,0)), color-stop(100%,rgba(16,59,71,1))); + background: -webkit-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + background: -o-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + background: -ms-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + background: linear-gradient(to top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#00103b47', endColorstr='#103b47',GradientType=0 ); +} + +.stats { + color: #FFF; +} + +.error { + color: #F58888 !important; + background: rgba(255, 45, 45, 0.35) !important; +} + +.spin-slowly, +.spin-once { + color: #33B5E5 !important; +} + +.frame .col, +footer { + border-color: #33B5E5; +} + +footer { + background: rgba(0, 0, 0, .5); +} + +footer .recording .fa { + color: #CC0000; +} + +footer .replay .fa { + color: #33B5E5; +} + +footer .paused .fa { + color: #AAA; +} + +footer .recording.replay .fa { + color: #33B5E5; +} + + + +.buildfail-pkg { + background: rgba(255, 255, 255, .1); +} +.buildfail-output { + background: rgba(255, 255, 255, .2); +} + + + +.panic-pkg { + background: rgba(255, 0, 0, .3); +} +.panic-story { + padding: 10px; + background: rgba(255, 0, 0, .1); +} +.panic-story a, +.panic-summary { + color: #E94A4A; +} +.panic-output { + color: #FF8181; +} + + + +.failure-pkg { + background: rgba(255, 153, 0, .42); +} +.failure-story { + padding: 10px; + background: rgba(255, 153, 0, .1); +} +.failure-story a { + color: #FFB518; +} +.failure-output { + color: #FFBD47; +} +.failure-file { + color: #FFF; +} + + +.diffviewer td { + border-color: rgba(0, 0, 0, .3); +} + +/* prettyTextDiff expected/deleted colors */ +.diffviewer .exp, +.diff del { + background: rgba(131, 252, 131, 0.22); +} + +/* prettyTextDiff actual/inserted colors */ +.diffviewer .act, +.diff ins { + background: rgba(255, 52, 52, 0.33); +} + + + +.story-links a, +.test-name-link a { + color: inherit; +} + + + +.story-pkg { + background: rgba(0, 0, 0, .4); +} + +.story-pkg:hover { + background: rgba(255, 255, 255, .05); +} + +.story-line + .story-line { + border-top: 1px dashed rgba(255, 255, 255, .08); +} + +.story-line-desc .message { + color: #999; +} + +.story-line-summary-container { + border-right: 1px dashed #333; +} + +.story-line.ok .story-line-status { background: #008000; } +.story-line.ok:hover, .story-line.ok.story-line-sel { background: rgba(0, 128, 0, .1); } + +.story-line.fail .story-line-status { background: #EA9C4D; } +.story-line.fail:hover, .story-line.fail.story-line-sel { background: rgba(234, 156, 77, .1); } + +.story-line.panic .story-line-status { background: #FF3232; } +.story-line.panic:hover, .story-line.panic.story-line-sel { background: rgba(255, 50, 50, .1); } + +.story-line.skip .story-line-status { background: #AAA; } +.story-line.skip:hover, .story-line.skip.story-line-sel { background: rgba(255, 255, 255, .1); } + +.statusicon.ok { color: #76C13C; } +.statusicon.fail, .fail-clr { color: #EA9C4D; } +.statusicon.panic, .statusicon.panic .fa, .panic-clr { color: #FF3232; } +.statusicon.skip, .skip-clr { color: #888; } + + +.log .timestamp { + color: #999; +} + + +.clr-red { + color: #FF2222; +} + + +.tipsy-inner { + background-color: #FAFAFA; + color: #222; +} + +.tipsy-arrow { + border: 8px dashed #FAFAFA; +} + +.tipsy-arrow-n, +.tipsy-arrow-s, +.tipsy-arrow-e, +.tipsy-arrow-w, +{ + border-color: #FAFAFA; +} + +/***************************************************************/ +/*************************** Tweaks ****************************/ +/***************************************************************/ + + +/* More space for stories */ +div#col-3 { display: none; } /* hides the log */ +div#col-2 { width: 85%; } /* fill it in with stories */ + +/* Bigger Text */ +.story-line { font-size: 16px; } +.story-line b { font-size: 20px; } +td.story-pkg-name { font-size: 24px; } + +/* Smaller Header */ +div.overall { padding: 10px 0 0px; } +.overall .status { font-size: 36px; } + +/***************************************************************/ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark.css b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark.css new file mode 100644 index 0000000000..60a5d0ccdb --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark.css @@ -0,0 +1,386 @@ +::-webkit-scrollbar { + width: 10px; + height: 10px; +} + +::-webkit-scrollbar-corner { + background: transparent; +} + +::-webkit-scrollbar-thumb { + background-color: rgba(255, 255, 255, .35); + border-radius: 10px; +} + +body { + color: #D0D0D0; + background: fixed #040607; + background: fixed -moz-linear-gradient(top, hsl(200,27%,2%) 0%, hsl(203,29%,26%) 100%); + background: fixed -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(200,27%,2%)), color-stop(100%,hsl(203,29%,26%))); + background: fixed -webkit-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + background: fixed -o-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + background: fixed -ms-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + background: fixed linear-gradient(to bottom, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#040607', endColorstr='#2f4756',GradientType=0 ); +} + +a, +.toggle-all-pkg { + color: #247D9E; +} + +a:hover, +.toggle-all-pkg:hover { + color: #33B5E5; +} + +input[type=text] { + border-bottom-color: #33B5E5; + color: #BBB; +} + +::-webkit-input-placeholder { + color: #555; +} +:-moz-placeholder { + color: #555; +} +::-moz-placeholder { + color: #555; +} +:-ms-input-placeholder { + color: #555; +} + +.overall { + /* + Using box-shadow here is not very performant but allows us + to animate the change of the background color much more easily. + This box-shadow is an ALTERNATIVE, not supplement, to using gradients + in this case. + */ + box-shadow: inset 0 150px 100px -110px rgba(0, 0, 0, .5); +} + +.overall.ok { + background: #688E00; +} + +.overall.fail { + background: #DB8700; +} + +.overall.panic { + background: #A80000; +} + +.overall.buildfail { + background: #A4A8AA; +} + +.overall .status { + color: #EEE; +} + +.server-down { + background: rgba(255, 45, 45, 0.55); + color: #FFF; +} + +.toggler { + background: #132535; +} + +.toggler:hover { + background: #1C374F; +} + +.controls { + border-bottom: 1px solid #33B5E5; +} + +.controls li { + color: #2A5A84; +} + +.controls li:hover { + background: #132535; + color: #33B5E5; +} + +.sel { + background: #33B5E5 !important; + color: #FFF !important; +} + +.pkg-cover-name { + text-shadow: 1px 1px 0px #000; +} + +.pkg-cover-name b, +.story-pkg-name b { + color: #FFF; + font-weight: bold; +} + +.pkg-cover:hover, +.pkg-cover:hover b { + color: #FFF; +} + +.expandable { + border-top-color: #33B5E5; +} + +.expandable { + background: rgba(0, 0, 0, .2); +} + +.history .item.ok { + background: #3f5400; + background: -moz-linear-gradient(top, hsl(75,100%,16%) 0%, hsl(76,100%,28%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(75,100%,16%)), color-stop(100%,hsl(76,100%,28%))); + background: -webkit-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + background: -o-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + background: -ms-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + background: linear-gradient(to bottom, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#3f5400', endColorstr='#698f00',GradientType=0 ); +} + +.history .item.fail { + background: #7f4e00; + background: -moz-linear-gradient(top, hsl(37,100%,25%) 0%, hsl(37,100%,43%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(37,100%,25%)), color-stop(100%,hsl(37,100%,43%))); + background: -webkit-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + background: -o-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + background: -ms-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + background: linear-gradient(to bottom, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#7f4e00', endColorstr='#db8700',GradientType=0 ); +} + +.history .item.panic { + background: #660000; + background: -moz-linear-gradient(top, hsl(0,100%,20%) 0%, hsl(0,100%,33%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(0,100%,20%)), color-stop(100%,hsl(0,100%,33%))); + background: -webkit-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + background: -o-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + background: -ms-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + background: linear-gradient(to bottom, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#660000', endColorstr='#a80000',GradientType=0 ); +} + +.history .item.buildfail { + background: #282f33; + background: -moz-linear-gradient(top, hsl(202,12%,18%) 0%, hsl(208,5%,48%) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(202,12%,18%)), color-stop(100%,hsl(208,5%,48%))); + background: -webkit-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + background: -o-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + background: -ms-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + background: linear-gradient(to bottom, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#282f33', endColorstr='#757c82',GradientType=0 ); +} + +.enum { + border-color: #2B597F; +} + +.enum > li { + border-left-color: #2B597F; +} + +.enum > li:hover { + background: rgba(55, 114, 163, .25); +} + +.group { + background: -moz-linear-gradient(top, rgba(16,59,71,0) 0%, rgba(16,59,71,1) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,rgba(16,59,71,0)), color-stop(100%,rgba(16,59,71,1))); + background: -webkit-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + background: -o-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + background: -ms-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + background: linear-gradient(to top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#00103b47', endColorstr='#103b47',GradientType=0 ); +} + +.stats { + color: #FFF; +} + +.error { + color: #F58888 !important; + background: rgba(255, 45, 45, 0.35) !important; +} + +.spin-slowly, +.spin-once { + color: #33B5E5 !important; +} + +.frame .col, +footer { + border-color: #33B5E5; +} + +footer { + background: rgba(0, 0, 0, .5); +} + +footer .recording .fa { + color: #CC0000; +} + +footer .replay .fa { + color: #33B5E5; +} + +footer .paused .fa { + color: #AAA; +} + +footer .recording.replay .fa { + color: #33B5E5; +} + + + +.buildfail-pkg { + background: rgba(255, 255, 255, .1); +} +.buildfail-output { + background: rgba(255, 255, 255, .2); +} + + + +.panic-pkg { + background: rgba(255, 0, 0, .3); +} +.panic-story { + padding: 10px; + background: rgba(255, 0, 0, .1); +} +.panic-story a, +.panic-summary { + color: #E94A4A; +} +.panic-output { + color: #FF8181; +} + + + +.failure-pkg { + background: rgba(255, 153, 0, .42); +} +.failure-story { + padding: 10px; + background: rgba(255, 153, 0, .1); +} +.failure-story a { + color: #FFB518; +} +.failure-output { + color: #FFBD47; +} +.failure-file { + color: #FFF; +} + + +.diffviewer td { + border-color: rgba(0, 0, 0, .3); +} + +/* prettyTextDiff expected/deleted colors */ +.diffviewer .exp, +.diff del { + background: rgba(131, 252, 131, 0.22); +} + +/* prettyTextDiff actual/inserted colors */ +.diffviewer .act, +.diff ins { + background: rgba(255, 52, 52, 0.33); +} + + + +.story-links a, +.test-name-link a { + color: inherit; +} + + + +.story-pkg { + background: rgba(0, 0, 0, .4); +} + +.story-pkg:hover { + background: rgba(255, 255, 255, .05); +} + +.story-line + .story-line { + border-top: 1px dashed rgba(255, 255, 255, .08); +} + +.story-line-desc .message { + color: #999; +} + +.story-line-summary-container { + border-right: 1px dashed #333; +} + +.story-line.ok .story-line-status { background: #008000; } +.story-line.ok:hover, .story-line.ok.story-line-sel { background: rgba(0, 128, 0, .1); } + +.story-line.fail .story-line-status { background: #EA9C4D; } +.story-line.fail:hover, .story-line.fail.story-line-sel { background: rgba(234, 156, 77, .1); } + +.story-line.panic .story-line-status { background: #FF3232; } +.story-line.panic:hover, .story-line.panic.story-line-sel { background: rgba(255, 50, 50, .1); } + +.story-line.skip .story-line-status { background: #AAA; } +.story-line.skip:hover, .story-line.skip.story-line-sel { background: rgba(255, 255, 255, .1); } + +.statusicon.ok { color: #76C13C; } +.statusicon.fail, .fail-clr { color: #EA9C4D; } +.statusicon.panic, .statusicon.panic .fa, .panic-clr { color: #FF3232; } +.statusicon.skip, .skip-clr { color: #888; } + +.ansi-green { color: #76C13C; } +.ansi-yellow { color: #EA9C4D; } +.ansi-red { color: #FF3232; } +.ansi-black { color: #000000; } +.ansi-blue { color: #FF3232; } +.ansi-purple { color: #C646C6; } +.ansi-cyan { color: #00CDCD; } +.ansi-white { color: #FFFFFF; } + +.log .timestamp { + color: #999; +} + + +.clr-red { + color: #FF2222; +} + + +.tipsy-inner { + background-color: #FAFAFA; + color: #222; +} + +.tipsy-arrow { + border: 8px dashed #FAFAFA; +} + +.tipsy-arrow-n, +.tipsy-arrow-s, +.tipsy-arrow-e, +.tipsy-arrow-w, +{ + border-color: #FAFAFA; +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/light.css b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/light.css new file mode 100644 index 0000000000..decfc7f413 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/light.css @@ -0,0 +1,328 @@ +::-webkit-scrollbar-thumb { + background-color: rgba(0, 0, 0, .35); + border-radius: 10px; +} + +::-webkit-input-placeholder { + color: #CCC; +} +:-moz-placeholder { + color: #CCC; +} +::-moz-placeholder { + color: #CCC; +} +:-ms-input-placeholder { + color: #CCC; +} + +body { + color: #444; + background: #F4F4F4; +} + +a { + color: #247D9E; +} + +a:hover { + color: #33B5E5; +} + +.overall.ok, +.history .item.ok { + background: #8CB700; /* Can't decide: #5AA02C */ +} + +.overall.fail, +.history .item.fail { + background: #E79C07; +} + +.overall.panic, +.history .item.panic { + background: #BB0000; +} + +.overall.buildfail, +.history .item.buildfail { + background: #828c95; +} + +.overall .status { + color: #EEE; +} + +.server-down { + background: #BB0000; + color: #FFF; +} + +.toggler { + background: #6887A3; + color: #FFF; +} + +.toggler:hover { + background: #465B6D; +} + +.toggler .fa { + color: #FFF; +} + +#logo { + color: #6887A3; +} + +.controls { + border-bottom: 1px solid #33B5E5; +} + +li.fa, +a.fa, +.toggle-all-pkg { + color: #6887A3; +} + +li.fa:hover, +a.fa:hover, +.toggle-all-pkg:hover { + color: #465B6D; +} + +li.fa:active, +a.fa:active, +.toggle-all-pkg:active { + color: #33B5E5; +} + +.controls li, +.enum > li { + border-left-color: #33B5E5; +} + +.controls li:hover, +.enum > li:hover { + background: #CFE6F9; +} + +.enum { + border-color: #33B5E5; +} + +.sel { + background: #33B5E5 !important; + color: #FFF !important; +} + +.pkg-cover-name b, +.story-pkg-name b { + color: #000; + font-weight: bold; +} + +.expandable { + background: rgba(0, 0, 0, .1); + border-top-color: #33B5E5; +} + +.history .item { + color: #FFF; +} + +.spin-slowly, +.spin-once { + color: #33B5E5 !important; +} + + +input[type=text] { + border-bottom-color: #33B5E5; + color: #333; +} + +.error { + color: #CC0000 !important; + background: #FFD2D2 !important; +} + + +footer { + background: #F4F4F4; +} + +.frame .col, +footer { + border-color: #33B5E5; +} + +footer .recording .fa { + color: #CC0000; +} + +footer .replay .fa { + color: #33B5E5; +} + +footer .paused .fa { + color: #333; +} + + +.buildfail-pkg { + background: #CCC; +} +.buildfail-output { + background: #EEE; +} + + + +.panic-pkg { + background: #E94D4D; + color: #FFF; +} +.panics .panic-details { + border: 5px solid #E94D4D; + border-top: 0; + border-bottom: 0; +} +.panic-details { + color: #CC0000; +} +.panics .panic:last-child .panic-details { + border-bottom: 5px solid #E94D4D; +} +.panic-story { + padding: 10px; +} +.panics .panic-output { + background: #FFF; +} + + + + +.failure-pkg { + background: #FFA300; + color: #FFF; +} +.failures .failure-details { + border: 5px solid #FFA300; + border-top: 0; + border-bottom: 0; +} +.failures .failure:last-child .failure-details { + border-bottom: 5px solid #FFA300; +} +.failure-story { + padding: 10px; + color: #A87A00; +} +.stories .failure-output { + color: #EA9C4D; +} +.failures .failure-output { + background: #FFF; +} +.failure-file { + color: #000; +} + +.diffviewer td { + border-color: #CCC; + background: #FFF; +} + +/* prettyTextDiff expected/deleted colors */ +.diffviewer .exp, +.diff del { + background: #ADFFAD; +} + +/* prettyTextDiff actual/inserted colors */ +.diffviewer .act, +.diff ins { + background: #FFC0C0; +} + + + +.story-links a, +.test-name-link a { + color: inherit; +} + + + +.story-pkg { + background: #E8E8E8; +} + +.story-pkg:hover { + background: #DFDFDF; +} + +.story-line { + background: #FFF; +} + +.story-line-desc .message { + color: #888; +} + +.story-line + .story-line { + border-top: 1px dashed #DDD; +} + +.story-line-summary-container { + border-right: 1px dashed #DDD; +} + +.story-line.ok .story-line-status { background: #8CB700; } +.story-line.ok:hover, .story-line.ok.story-line-sel { background: #F4FFD8; } + +.story-line.fail .story-line-status { background: #E79C07; } +.story-line.fail:hover, .story-line.fail.story-line-sel { background: #FFF1DB; } + +.story-line.panic .story-line-status { background: #DD0606; } +.story-line.panic:hover, .story-line.panic.story-line-sel { background: #FFE8E8; } + +.story-line.skip .story-line-status { background: #4E4E4E; } +.story-line.skip:hover, .story-line.skip.story-line-sel { background: #F2F2F2; } + +.statusicon.ok { color: #76C13C; } +.statusicon.fail, .fail-clr { color: #EA9C4D; } +.statusicon.panic, .statusicon.panic .fa, .panic-clr { color: #FF3232; } +.statusicon.skip, .skip-clr { color: #AAA; } + +.ansi-green { color: #76C13C; } +.ansi-yellow { color: #EA9C4D; } + +.log .timestamp { + color: #999; +} + +.clr-red, +a.clr-red { + color: #CC0000; +} + + +.tipsy-inner { + background-color: #000; + color: #FFF; +} + +.tipsy-arrow { + border: 8px dashed #000; +} + +.tipsy-arrow-n, +.tipsy-arrow-s, +.tipsy-arrow-e, +.tipsy-arrow-w, +{ + border-color: #000; +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/tipsy.css b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/tipsy.css new file mode 100644 index 0000000000..25d261a4ff --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/css/tipsy.css @@ -0,0 +1,97 @@ +.tipsy { + font-size: 12px; + position: absolute; + padding: 8px; + z-index: 100000; + font-family: 'Open Sans'; + line-height: 1.25em; +} + +.tipsy-inner { + max-width: 200px; + padding: 5px 7px; + text-align: center; +} + +/* Rounded corners */ +/*.tipsy-inner { border-radius: 3px; -moz-border-radius: 3px; -webkit-border-radius: 3px; }*/ + +/* Shadow */ +/*.tipsy-inner { box-shadow: 0 0 5px #000000; -webkit-box-shadow: 0 0 5px #000000; -moz-box-shadow: 0 0 5px #000000; }*/ + +.tipsy-arrow { + position: absolute; + width: 0; + height: 0; + line-height: 0; +} + +.tipsy-n .tipsy-arrow, +.tipsy-nw .tipsy-arrow, +.tipsy-ne .tipsy-arrow { + border-bottom-style: solid; + border-top: none; + border-left-color: transparent; + border-right-color: transparent; +} + + +.tipsy-n .tipsy-arrow { + top: 0px; + left: 50%; + margin-left: -7px; +} +.tipsy-nw .tipsy-arrow { + top: 0; + left: 10px; +} +.tipsy-ne .tipsy-arrow { + top: 0; + right: 10px; +} + +.tipsy-s .tipsy-arrow, +.tipsy-sw .tipsy-arrow, +.tipsy-se .tipsy-arrow { + border-top-style: solid; + border-bottom: none; + border-left-color: transparent; + border-right-color: transparent; +} + + +.tipsy-s .tipsy-arrow { + bottom: 0; + left: 50%; + margin-left: -7px; +} + +.tipsy-sw .tipsy-arrow { + bottom: 0; + left: 10px; +} + +.tipsy-se .tipsy-arrow { + bottom: 0; + right: 10px; +} + +.tipsy-e .tipsy-arrow { + right: 0; + top: 50%; + margin-top: -7px; + border-left-style: solid; + border-right: none; + border-top-color: transparent; + border-bottom-color: transparent; +} + +.tipsy-w .tipsy-arrow { + left: 0; + top: 50%; + margin-top: -7px; + border-right-style: solid; + border-left: none; + border-top-color: transparent; + border-bottom-color: transparent; +} \ No newline at end of file diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/README.md b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/README.md new file mode 100644 index 0000000000..abe2489f38 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/README.md @@ -0,0 +1,100 @@ +#[Font Awesome v4.5.0](http://fontawesome.io) +###The iconic font and CSS framework + +Font Awesome is a full suite of 605 pictographic icons for easy scalable vector graphics on websites, +created and maintained by [Dave Gandy](http://twitter.com/davegandy). +Stay up to date with the latest release and announcements on Twitter: +[@fontawesome](http://twitter.com/fontawesome). + +Get started at http://fontawesome.io! + +##License +- The Font Awesome font is licensed under the SIL OFL 1.1: + - http://scripts.sil.org/OFL +- Font Awesome CSS, LESS, and Sass files are licensed under the MIT License: + - http://opensource.org/licenses/mit-license.html +- The Font Awesome documentation is licensed under the CC BY 3.0 License: + - http://creativecommons.org/licenses/by/3.0/ +- Attribution is no longer required as of Font Awesome 3.0, but much appreciated: + - `Font Awesome by Dave Gandy - http://fontawesome.io` +- Full details: http://fontawesome.io/license + +##Changelog +- [v4.5.0 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?q=milestone%3A4.5.0+is%3Aclosed) +- [v4.4.0 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?q=milestone%3A4.4.0+is%3Aclosed) +- [v4.3.0 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?q=milestone%3A4.3.0+is%3Aclosed) +- [v4.2.0 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=12&page=1&state=closed) +- [v4.1.0 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=6&page=1&state=closed) +- [v4.0.3 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=9&page=1&state=closed) +- [v4.0.2 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=8&page=1&state=closed) +- [v4.0.1 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=7&page=1&state=closed) +- [v4.0.0 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=2&page=1&state=closed) +- [v3.2.1 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=5&page=1&state=closed) +- [v3.2.0 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=3&page=1&state=closed) +- [v3.1.1 GitHub milestones](https://github.com/FortAwesome/Font-Awesome/issues?milestone=4&page=1&state=closed) +- v3.1.0 - Added 54 icons, icon stacking styles, flipping and rotating icons, removed Sass support +- v3.0.2 - much improved rendering and alignment in IE7 +- v3.0.1 - much improved rendering in webkit, various bug fixes +- v3.0.0 - all icons redesigned from scratch, optimized for Bootstrap's 14px default + +## Contributing + +Please read through our [contributing guidelines](https://github.com/FortAwesome/Font-Awesome/blob/master/CONTRIBUTING.md). +Included are directions for opening issues, coding standards, and notes on development. + +##Versioning + +Font Awesome will be maintained under the Semantic Versioning guidelines as much as possible. Releases will be numbered +with the following format: + +`..` + +And constructed with the following guidelines: + +* Breaking backward compatibility bumps the major (and resets the minor and patch) +* New additions, including new icons, without breaking backward compatibility bumps the minor (and resets the patch) +* Bug fixes and misc changes bumps the patch + +For more information on SemVer, please visit http://semver.org. + +##Author +- Email: dave@fontawesome.io +- Twitter: http://twitter.com/davegandy +- GitHub: https://github.com/davegandy + +##Component +To include as a [component](http://github.com/component/component), just run + + $ component install FortAwesome/Font-Awesome + +Or add + + "FortAwesome/Font-Awesome": "*" + +to the `dependencies` in your `component.json`. + +## Hacking on Font Awesome + +**Before you can build the project**, you must first have the following installed: + +- [Ruby](https://www.ruby-lang.org/en/) +- Ruby Development Headers + - **Ubuntu:** `sudo apt-get install ruby-dev` *(Only if you're __NOT__ using `rbenv` or `rvm`)* + - **Windows:** [DevKit](http://rubyinstaller.org/) +- [Bundler](http://bundler.io/) (Run `gem install bundler` to install). +- [Node Package Manager (AKA NPM)](https://docs.npmjs.com/getting-started/installing-node) +- [Less](http://lesscss.org/) (Run `npm install -g less` to install). +- [Less Plugin: Clean CSS](https://github.com/less/less-plugin-clean-css) (Run `npm install -g less-plugin-clean-css` to install). + +From the root of the repository, install the tools used to develop. + + $ bundle install + $ npm install + +Build the project and documentation: + + $ bundle exec jekyll build + +Or serve it on a local server on http://localhost:7998/Font-Awesome/: + + $ bundle exec jekyll -w serve diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/css/font-awesome.css b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/css/font-awesome.css new file mode 100644 index 0000000000..b2a5fe2f25 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/css/font-awesome.css @@ -0,0 +1,2086 @@ +/*! + * Font Awesome 4.5.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */ +/* FONT PATH + * -------------------------- */ +@font-face { + font-family: 'FontAwesome'; + src: url('../fonts/fontawesome-webfont.eot?v=4.5.0'); + src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.5.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.5.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.5.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.5.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.5.0#fontawesomeregular') format('svg'); + font-weight: normal; + font-style: normal; +} +.fa { + display: inline-block; + font: normal normal normal 14px/1 FontAwesome; + font-size: inherit; + text-rendering: auto; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} +/* makes the font 33% larger relative to the icon container */ +.fa-lg { + font-size: 1.33333333em; + line-height: 0.75em; + vertical-align: -15%; +} +.fa-2x { + font-size: 2em; +} +.fa-3x { + font-size: 3em; +} +.fa-4x { + font-size: 4em; +} +.fa-5x { + font-size: 5em; +} +.fa-fw { + width: 1.28571429em; + text-align: center; +} +.fa-ul { + padding-left: 0; + margin-left: 2.14285714em; + list-style-type: none; +} +.fa-ul > li { + position: relative; +} +.fa-li { + position: absolute; + left: -2.14285714em; + width: 2.14285714em; + top: 0.14285714em; + text-align: center; +} +.fa-li.fa-lg { + left: -1.85714286em; +} +.fa-border { + padding: .2em .25em .15em; + border: solid 0.08em #eeeeee; + border-radius: .1em; +} +.fa-pull-left { + float: left; +} +.fa-pull-right { + float: right; +} +.fa.fa-pull-left { + margin-right: .3em; +} +.fa.fa-pull-right { + margin-left: .3em; +} +/* Deprecated as of 4.4.0 */ +.pull-right { + float: right; +} +.pull-left { + float: left; +} +.fa.pull-left { + margin-right: .3em; +} +.fa.pull-right { + margin-left: .3em; +} +.fa-spin { + -webkit-animation: fa-spin 2s infinite linear; + animation: fa-spin 2s infinite linear; +} +.fa-pulse { + -webkit-animation: fa-spin 1s infinite steps(8); + animation: fa-spin 1s infinite steps(8); +} +@-webkit-keyframes fa-spin { + 0% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); + } + 100% { + -webkit-transform: rotate(359deg); + transform: rotate(359deg); + } +} +@keyframes fa-spin { + 0% { + -webkit-transform: rotate(0deg); + transform: rotate(0deg); + } + 100% { + -webkit-transform: rotate(359deg); + transform: rotate(359deg); + } +} +.fa-rotate-90 { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1); + -webkit-transform: rotate(90deg); + -ms-transform: rotate(90deg); + transform: rotate(90deg); +} +.fa-rotate-180 { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2); + -webkit-transform: rotate(180deg); + -ms-transform: rotate(180deg); + transform: rotate(180deg); +} +.fa-rotate-270 { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3); + -webkit-transform: rotate(270deg); + -ms-transform: rotate(270deg); + transform: rotate(270deg); +} +.fa-flip-horizontal { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1); + -webkit-transform: scale(-1, 1); + -ms-transform: scale(-1, 1); + transform: scale(-1, 1); +} +.fa-flip-vertical { + filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1); + -webkit-transform: scale(1, -1); + -ms-transform: scale(1, -1); + transform: scale(1, -1); +} +:root .fa-rotate-90, +:root .fa-rotate-180, +:root .fa-rotate-270, +:root .fa-flip-horizontal, +:root .fa-flip-vertical { + filter: none; +} +.fa-stack { + position: relative; + display: inline-block; + width: 2em; + height: 2em; + line-height: 2em; + vertical-align: middle; +} +.fa-stack-1x, +.fa-stack-2x { + position: absolute; + left: 0; + width: 100%; + text-align: center; +} +.fa-stack-1x { + line-height: inherit; +} +.fa-stack-2x { + font-size: 2em; +} +.fa-inverse { + color: #ffffff; +} +/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen + readers do not read off random characters that represent icons */ +.fa-glass:before { + content: "\f000"; +} +.fa-music:before { + content: "\f001"; +} +.fa-search:before { + content: "\f002"; +} +.fa-envelope-o:before { + content: "\f003"; +} +.fa-heart:before { + content: "\f004"; +} +.fa-star:before { + content: "\f005"; +} +.fa-star-o:before { + content: "\f006"; +} +.fa-user:before { + content: "\f007"; +} +.fa-film:before { + content: "\f008"; +} +.fa-th-large:before { + content: "\f009"; +} +.fa-th:before { + content: "\f00a"; +} +.fa-th-list:before { + content: "\f00b"; +} +.fa-check:before { + content: "\f00c"; +} +.fa-remove:before, +.fa-close:before, +.fa-times:before { + content: "\f00d"; +} +.fa-search-plus:before { + content: "\f00e"; +} +.fa-search-minus:before { + content: "\f010"; +} +.fa-power-off:before { + content: "\f011"; +} +.fa-signal:before { + content: "\f012"; +} +.fa-gear:before, +.fa-cog:before { + content: "\f013"; +} +.fa-trash-o:before { + content: "\f014"; +} +.fa-home:before { + content: "\f015"; +} +.fa-file-o:before { + content: "\f016"; +} +.fa-clock-o:before { + content: "\f017"; +} +.fa-road:before { + content: "\f018"; +} +.fa-download:before { + content: "\f019"; +} +.fa-arrow-circle-o-down:before { + content: "\f01a"; +} +.fa-arrow-circle-o-up:before { + content: "\f01b"; +} +.fa-inbox:before { + content: "\f01c"; +} +.fa-play-circle-o:before { + content: "\f01d"; +} +.fa-rotate-right:before, +.fa-repeat:before { + content: "\f01e"; +} +.fa-refresh:before { + content: "\f021"; +} +.fa-list-alt:before { + content: "\f022"; +} +.fa-lock:before { + content: "\f023"; +} +.fa-flag:before { + content: "\f024"; +} +.fa-headphones:before { + content: "\f025"; +} +.fa-volume-off:before { + content: "\f026"; +} +.fa-volume-down:before { + content: "\f027"; +} +.fa-volume-up:before { + content: "\f028"; +} +.fa-qrcode:before { + content: "\f029"; +} +.fa-barcode:before { + content: "\f02a"; +} +.fa-tag:before { + content: "\f02b"; +} +.fa-tags:before { + content: "\f02c"; +} +.fa-book:before { + content: "\f02d"; +} +.fa-bookmark:before { + content: "\f02e"; +} +.fa-print:before { + content: "\f02f"; +} +.fa-camera:before { + content: "\f030"; +} +.fa-font:before { + content: "\f031"; +} +.fa-bold:before { + content: "\f032"; +} +.fa-italic:before { + content: "\f033"; +} +.fa-text-height:before { + content: "\f034"; +} +.fa-text-width:before { + content: "\f035"; +} +.fa-align-left:before { + content: "\f036"; +} +.fa-align-center:before { + content: "\f037"; +} +.fa-align-right:before { + content: "\f038"; +} +.fa-align-justify:before { + content: "\f039"; +} +.fa-list:before { + content: "\f03a"; +} +.fa-dedent:before, +.fa-outdent:before { + content: "\f03b"; +} +.fa-indent:before { + content: "\f03c"; +} +.fa-video-camera:before { + content: "\f03d"; +} +.fa-photo:before, +.fa-image:before, +.fa-picture-o:before { + content: "\f03e"; +} +.fa-pencil:before { + content: "\f040"; +} +.fa-map-marker:before { + content: "\f041"; +} +.fa-adjust:before { + content: "\f042"; +} +.fa-tint:before { + content: "\f043"; +} +.fa-edit:before, +.fa-pencil-square-o:before { + content: "\f044"; +} +.fa-share-square-o:before { + content: "\f045"; +} +.fa-check-square-o:before { + content: "\f046"; +} +.fa-arrows:before { + content: "\f047"; +} +.fa-step-backward:before { + content: "\f048"; +} +.fa-fast-backward:before { + content: "\f049"; +} +.fa-backward:before { + content: "\f04a"; +} +.fa-play:before { + content: "\f04b"; +} +.fa-pause:before { + content: "\f04c"; +} +.fa-stop:before { + content: "\f04d"; +} +.fa-forward:before { + content: "\f04e"; +} +.fa-fast-forward:before { + content: "\f050"; +} +.fa-step-forward:before { + content: "\f051"; +} +.fa-eject:before { + content: "\f052"; +} +.fa-chevron-left:before { + content: "\f053"; +} +.fa-chevron-right:before { + content: "\f054"; +} +.fa-plus-circle:before { + content: "\f055"; +} +.fa-minus-circle:before { + content: "\f056"; +} +.fa-times-circle:before { + content: "\f057"; +} +.fa-check-circle:before { + content: "\f058"; +} +.fa-question-circle:before { + content: "\f059"; +} +.fa-info-circle:before { + content: "\f05a"; +} +.fa-crosshairs:before { + content: "\f05b"; +} +.fa-times-circle-o:before { + content: "\f05c"; +} +.fa-check-circle-o:before { + content: "\f05d"; +} +.fa-ban:before { + content: "\f05e"; +} +.fa-arrow-left:before { + content: "\f060"; +} +.fa-arrow-right:before { + content: "\f061"; +} +.fa-arrow-up:before { + content: "\f062"; +} +.fa-arrow-down:before { + content: "\f063"; +} +.fa-mail-forward:before, +.fa-share:before { + content: "\f064"; +} +.fa-expand:before { + content: "\f065"; +} +.fa-compress:before { + content: "\f066"; +} +.fa-plus:before { + content: "\f067"; +} +.fa-minus:before { + content: "\f068"; +} +.fa-asterisk:before { + content: "\f069"; +} +.fa-exclamation-circle:before { + content: "\f06a"; +} +.fa-gift:before { + content: "\f06b"; +} +.fa-leaf:before { + content: "\f06c"; +} +.fa-fire:before { + content: "\f06d"; +} +.fa-eye:before { + content: "\f06e"; +} +.fa-eye-slash:before { + content: "\f070"; +} +.fa-warning:before, +.fa-exclamation-triangle:before { + content: "\f071"; +} +.fa-plane:before { + content: "\f072"; +} +.fa-calendar:before { + content: "\f073"; +} +.fa-random:before { + content: "\f074"; +} +.fa-comment:before { + content: "\f075"; +} +.fa-magnet:before { + content: "\f076"; +} +.fa-chevron-up:before { + content: "\f077"; +} +.fa-chevron-down:before { + content: "\f078"; +} +.fa-retweet:before { + content: "\f079"; +} +.fa-shopping-cart:before { + content: "\f07a"; +} +.fa-folder:before { + content: "\f07b"; +} +.fa-folder-open:before { + content: "\f07c"; +} +.fa-arrows-v:before { + content: "\f07d"; +} +.fa-arrows-h:before { + content: "\f07e"; +} +.fa-bar-chart-o:before, +.fa-bar-chart:before { + content: "\f080"; +} +.fa-twitter-square:before { + content: "\f081"; +} +.fa-facebook-square:before { + content: "\f082"; +} +.fa-camera-retro:before { + content: "\f083"; +} +.fa-key:before { + content: "\f084"; +} +.fa-gears:before, +.fa-cogs:before { + content: "\f085"; +} +.fa-comments:before { + content: "\f086"; +} +.fa-thumbs-o-up:before { + content: "\f087"; +} +.fa-thumbs-o-down:before { + content: "\f088"; +} +.fa-star-half:before { + content: "\f089"; +} +.fa-heart-o:before { + content: "\f08a"; +} +.fa-sign-out:before { + content: "\f08b"; +} +.fa-linkedin-square:before { + content: "\f08c"; +} +.fa-thumb-tack:before { + content: "\f08d"; +} +.fa-external-link:before { + content: "\f08e"; +} +.fa-sign-in:before { + content: "\f090"; +} +.fa-trophy:before { + content: "\f091"; +} +.fa-github-square:before { + content: "\f092"; +} +.fa-upload:before { + content: "\f093"; +} +.fa-lemon-o:before { + content: "\f094"; +} +.fa-phone:before { + content: "\f095"; +} +.fa-square-o:before { + content: "\f096"; +} +.fa-bookmark-o:before { + content: "\f097"; +} +.fa-phone-square:before { + content: "\f098"; +} +.fa-twitter:before { + content: "\f099"; +} +.fa-facebook-f:before, +.fa-facebook:before { + content: "\f09a"; +} +.fa-github:before { + content: "\f09b"; +} +.fa-unlock:before { + content: "\f09c"; +} +.fa-credit-card:before { + content: "\f09d"; +} +.fa-feed:before, +.fa-rss:before { + content: "\f09e"; +} +.fa-hdd-o:before { + content: "\f0a0"; +} +.fa-bullhorn:before { + content: "\f0a1"; +} +.fa-bell:before { + content: "\f0f3"; +} +.fa-certificate:before { + content: "\f0a3"; +} +.fa-hand-o-right:before { + content: "\f0a4"; +} +.fa-hand-o-left:before { + content: "\f0a5"; +} +.fa-hand-o-up:before { + content: "\f0a6"; +} +.fa-hand-o-down:before { + content: "\f0a7"; +} +.fa-arrow-circle-left:before { + content: "\f0a8"; +} +.fa-arrow-circle-right:before { + content: "\f0a9"; +} +.fa-arrow-circle-up:before { + content: "\f0aa"; +} +.fa-arrow-circle-down:before { + content: "\f0ab"; +} +.fa-globe:before { + content: "\f0ac"; +} +.fa-wrench:before { + content: "\f0ad"; +} +.fa-tasks:before { + content: "\f0ae"; +} +.fa-filter:before { + content: "\f0b0"; +} +.fa-briefcase:before { + content: "\f0b1"; +} +.fa-arrows-alt:before { + content: "\f0b2"; +} +.fa-group:before, +.fa-users:before { + content: "\f0c0"; +} +.fa-chain:before, +.fa-link:before { + content: "\f0c1"; +} +.fa-cloud:before { + content: "\f0c2"; +} +.fa-flask:before { + content: "\f0c3"; +} +.fa-cut:before, +.fa-scissors:before { + content: "\f0c4"; +} +.fa-copy:before, +.fa-files-o:before { + content: "\f0c5"; +} +.fa-paperclip:before { + content: "\f0c6"; +} +.fa-save:before, +.fa-floppy-o:before { + content: "\f0c7"; +} +.fa-square:before { + content: "\f0c8"; +} +.fa-navicon:before, +.fa-reorder:before, +.fa-bars:before { + content: "\f0c9"; +} +.fa-list-ul:before { + content: "\f0ca"; +} +.fa-list-ol:before { + content: "\f0cb"; +} +.fa-strikethrough:before { + content: "\f0cc"; +} +.fa-underline:before { + content: "\f0cd"; +} +.fa-table:before { + content: "\f0ce"; +} +.fa-magic:before { + content: "\f0d0"; +} +.fa-truck:before { + content: "\f0d1"; +} +.fa-pinterest:before { + content: "\f0d2"; +} +.fa-pinterest-square:before { + content: "\f0d3"; +} +.fa-google-plus-square:before { + content: "\f0d4"; +} +.fa-google-plus:before { + content: "\f0d5"; +} +.fa-money:before { + content: "\f0d6"; +} +.fa-caret-down:before { + content: "\f0d7"; +} +.fa-caret-up:before { + content: "\f0d8"; +} +.fa-caret-left:before { + content: "\f0d9"; +} +.fa-caret-right:before { + content: "\f0da"; +} +.fa-columns:before { + content: "\f0db"; +} +.fa-unsorted:before, +.fa-sort:before { + content: "\f0dc"; +} +.fa-sort-down:before, +.fa-sort-desc:before { + content: "\f0dd"; +} +.fa-sort-up:before, +.fa-sort-asc:before { + content: "\f0de"; +} +.fa-envelope:before { + content: "\f0e0"; +} +.fa-linkedin:before { + content: "\f0e1"; +} +.fa-rotate-left:before, +.fa-undo:before { + content: "\f0e2"; +} +.fa-legal:before, +.fa-gavel:before { + content: "\f0e3"; +} +.fa-dashboard:before, +.fa-tachometer:before { + content: "\f0e4"; +} +.fa-comment-o:before { + content: "\f0e5"; +} +.fa-comments-o:before { + content: "\f0e6"; +} +.fa-flash:before, +.fa-bolt:before { + content: "\f0e7"; +} +.fa-sitemap:before { + content: "\f0e8"; +} +.fa-umbrella:before { + content: "\f0e9"; +} +.fa-paste:before, +.fa-clipboard:before { + content: "\f0ea"; +} +.fa-lightbulb-o:before { + content: "\f0eb"; +} +.fa-exchange:before { + content: "\f0ec"; +} +.fa-cloud-download:before { + content: "\f0ed"; +} +.fa-cloud-upload:before { + content: "\f0ee"; +} +.fa-user-md:before { + content: "\f0f0"; +} +.fa-stethoscope:before { + content: "\f0f1"; +} +.fa-suitcase:before { + content: "\f0f2"; +} +.fa-bell-o:before { + content: "\f0a2"; +} +.fa-coffee:before { + content: "\f0f4"; +} +.fa-cutlery:before { + content: "\f0f5"; +} +.fa-file-text-o:before { + content: "\f0f6"; +} +.fa-building-o:before { + content: "\f0f7"; +} +.fa-hospital-o:before { + content: "\f0f8"; +} +.fa-ambulance:before { + content: "\f0f9"; +} +.fa-medkit:before { + content: "\f0fa"; +} +.fa-fighter-jet:before { + content: "\f0fb"; +} +.fa-beer:before { + content: "\f0fc"; +} +.fa-h-square:before { + content: "\f0fd"; +} +.fa-plus-square:before { + content: "\f0fe"; +} +.fa-angle-double-left:before { + content: "\f100"; +} +.fa-angle-double-right:before { + content: "\f101"; +} +.fa-angle-double-up:before { + content: "\f102"; +} +.fa-angle-double-down:before { + content: "\f103"; +} +.fa-angle-left:before { + content: "\f104"; +} +.fa-angle-right:before { + content: "\f105"; +} +.fa-angle-up:before { + content: "\f106"; +} +.fa-angle-down:before { + content: "\f107"; +} +.fa-desktop:before { + content: "\f108"; +} +.fa-laptop:before { + content: "\f109"; +} +.fa-tablet:before { + content: "\f10a"; +} +.fa-mobile-phone:before, +.fa-mobile:before { + content: "\f10b"; +} +.fa-circle-o:before { + content: "\f10c"; +} +.fa-quote-left:before { + content: "\f10d"; +} +.fa-quote-right:before { + content: "\f10e"; +} +.fa-spinner:before { + content: "\f110"; +} +.fa-circle:before { + content: "\f111"; +} +.fa-mail-reply:before, +.fa-reply:before { + content: "\f112"; +} +.fa-github-alt:before { + content: "\f113"; +} +.fa-folder-o:before { + content: "\f114"; +} +.fa-folder-open-o:before { + content: "\f115"; +} +.fa-smile-o:before { + content: "\f118"; +} +.fa-frown-o:before { + content: "\f119"; +} +.fa-meh-o:before { + content: "\f11a"; +} +.fa-gamepad:before { + content: "\f11b"; +} +.fa-keyboard-o:before { + content: "\f11c"; +} +.fa-flag-o:before { + content: "\f11d"; +} +.fa-flag-checkered:before { + content: "\f11e"; +} +.fa-terminal:before { + content: "\f120"; +} +.fa-code:before { + content: "\f121"; +} +.fa-mail-reply-all:before, +.fa-reply-all:before { + content: "\f122"; +} +.fa-star-half-empty:before, +.fa-star-half-full:before, +.fa-star-half-o:before { + content: "\f123"; +} +.fa-location-arrow:before { + content: "\f124"; +} +.fa-crop:before { + content: "\f125"; +} +.fa-code-fork:before { + content: "\f126"; +} +.fa-unlink:before, +.fa-chain-broken:before { + content: "\f127"; +} +.fa-question:before { + content: "\f128"; +} +.fa-info:before { + content: "\f129"; +} +.fa-exclamation:before { + content: "\f12a"; +} +.fa-superscript:before { + content: "\f12b"; +} +.fa-subscript:before { + content: "\f12c"; +} +.fa-eraser:before { + content: "\f12d"; +} +.fa-puzzle-piece:before { + content: "\f12e"; +} +.fa-microphone:before { + content: "\f130"; +} +.fa-microphone-slash:before { + content: "\f131"; +} +.fa-shield:before { + content: "\f132"; +} +.fa-calendar-o:before { + content: "\f133"; +} +.fa-fire-extinguisher:before { + content: "\f134"; +} +.fa-rocket:before { + content: "\f135"; +} +.fa-maxcdn:before { + content: "\f136"; +} +.fa-chevron-circle-left:before { + content: "\f137"; +} +.fa-chevron-circle-right:before { + content: "\f138"; +} +.fa-chevron-circle-up:before { + content: "\f139"; +} +.fa-chevron-circle-down:before { + content: "\f13a"; +} +.fa-html5:before { + content: "\f13b"; +} +.fa-css3:before { + content: "\f13c"; +} +.fa-anchor:before { + content: "\f13d"; +} +.fa-unlock-alt:before { + content: "\f13e"; +} +.fa-bullseye:before { + content: "\f140"; +} +.fa-ellipsis-h:before { + content: "\f141"; +} +.fa-ellipsis-v:before { + content: "\f142"; +} +.fa-rss-square:before { + content: "\f143"; +} +.fa-play-circle:before { + content: "\f144"; +} +.fa-ticket:before { + content: "\f145"; +} +.fa-minus-square:before { + content: "\f146"; +} +.fa-minus-square-o:before { + content: "\f147"; +} +.fa-level-up:before { + content: "\f148"; +} +.fa-level-down:before { + content: "\f149"; +} +.fa-check-square:before { + content: "\f14a"; +} +.fa-pencil-square:before { + content: "\f14b"; +} +.fa-external-link-square:before { + content: "\f14c"; +} +.fa-share-square:before { + content: "\f14d"; +} +.fa-compass:before { + content: "\f14e"; +} +.fa-toggle-down:before, +.fa-caret-square-o-down:before { + content: "\f150"; +} +.fa-toggle-up:before, +.fa-caret-square-o-up:before { + content: "\f151"; +} +.fa-toggle-right:before, +.fa-caret-square-o-right:before { + content: "\f152"; +} +.fa-euro:before, +.fa-eur:before { + content: "\f153"; +} +.fa-gbp:before { + content: "\f154"; +} +.fa-dollar:before, +.fa-usd:before { + content: "\f155"; +} +.fa-rupee:before, +.fa-inr:before { + content: "\f156"; +} +.fa-cny:before, +.fa-rmb:before, +.fa-yen:before, +.fa-jpy:before { + content: "\f157"; +} +.fa-ruble:before, +.fa-rouble:before, +.fa-rub:before { + content: "\f158"; +} +.fa-won:before, +.fa-krw:before { + content: "\f159"; +} +.fa-bitcoin:before, +.fa-btc:before { + content: "\f15a"; +} +.fa-file:before { + content: "\f15b"; +} +.fa-file-text:before { + content: "\f15c"; +} +.fa-sort-alpha-asc:before { + content: "\f15d"; +} +.fa-sort-alpha-desc:before { + content: "\f15e"; +} +.fa-sort-amount-asc:before { + content: "\f160"; +} +.fa-sort-amount-desc:before { + content: "\f161"; +} +.fa-sort-numeric-asc:before { + content: "\f162"; +} +.fa-sort-numeric-desc:before { + content: "\f163"; +} +.fa-thumbs-up:before { + content: "\f164"; +} +.fa-thumbs-down:before { + content: "\f165"; +} +.fa-youtube-square:before { + content: "\f166"; +} +.fa-youtube:before { + content: "\f167"; +} +.fa-xing:before { + content: "\f168"; +} +.fa-xing-square:before { + content: "\f169"; +} +.fa-youtube-play:before { + content: "\f16a"; +} +.fa-dropbox:before { + content: "\f16b"; +} +.fa-stack-overflow:before { + content: "\f16c"; +} +.fa-instagram:before { + content: "\f16d"; +} +.fa-flickr:before { + content: "\f16e"; +} +.fa-adn:before { + content: "\f170"; +} +.fa-bitbucket:before { + content: "\f171"; +} +.fa-bitbucket-square:before { + content: "\f172"; +} +.fa-tumblr:before { + content: "\f173"; +} +.fa-tumblr-square:before { + content: "\f174"; +} +.fa-long-arrow-down:before { + content: "\f175"; +} +.fa-long-arrow-up:before { + content: "\f176"; +} +.fa-long-arrow-left:before { + content: "\f177"; +} +.fa-long-arrow-right:before { + content: "\f178"; +} +.fa-apple:before { + content: "\f179"; +} +.fa-windows:before { + content: "\f17a"; +} +.fa-android:before { + content: "\f17b"; +} +.fa-linux:before { + content: "\f17c"; +} +.fa-dribbble:before { + content: "\f17d"; +} +.fa-skype:before { + content: "\f17e"; +} +.fa-foursquare:before { + content: "\f180"; +} +.fa-trello:before { + content: "\f181"; +} +.fa-female:before { + content: "\f182"; +} +.fa-male:before { + content: "\f183"; +} +.fa-gittip:before, +.fa-gratipay:before { + content: "\f184"; +} +.fa-sun-o:before { + content: "\f185"; +} +.fa-moon-o:before { + content: "\f186"; +} +.fa-archive:before { + content: "\f187"; +} +.fa-bug:before { + content: "\f188"; +} +.fa-vk:before { + content: "\f189"; +} +.fa-weibo:before { + content: "\f18a"; +} +.fa-renren:before { + content: "\f18b"; +} +.fa-pagelines:before { + content: "\f18c"; +} +.fa-stack-exchange:before { + content: "\f18d"; +} +.fa-arrow-circle-o-right:before { + content: "\f18e"; +} +.fa-arrow-circle-o-left:before { + content: "\f190"; +} +.fa-toggle-left:before, +.fa-caret-square-o-left:before { + content: "\f191"; +} +.fa-dot-circle-o:before { + content: "\f192"; +} +.fa-wheelchair:before { + content: "\f193"; +} +.fa-vimeo-square:before { + content: "\f194"; +} +.fa-turkish-lira:before, +.fa-try:before { + content: "\f195"; +} +.fa-plus-square-o:before { + content: "\f196"; +} +.fa-space-shuttle:before { + content: "\f197"; +} +.fa-slack:before { + content: "\f198"; +} +.fa-envelope-square:before { + content: "\f199"; +} +.fa-wordpress:before { + content: "\f19a"; +} +.fa-openid:before { + content: "\f19b"; +} +.fa-institution:before, +.fa-bank:before, +.fa-university:before { + content: "\f19c"; +} +.fa-mortar-board:before, +.fa-graduation-cap:before { + content: "\f19d"; +} +.fa-yahoo:before { + content: "\f19e"; +} +.fa-google:before { + content: "\f1a0"; +} +.fa-reddit:before { + content: "\f1a1"; +} +.fa-reddit-square:before { + content: "\f1a2"; +} +.fa-stumbleupon-circle:before { + content: "\f1a3"; +} +.fa-stumbleupon:before { + content: "\f1a4"; +} +.fa-delicious:before { + content: "\f1a5"; +} +.fa-digg:before { + content: "\f1a6"; +} +.fa-pied-piper:before { + content: "\f1a7"; +} +.fa-pied-piper-alt:before { + content: "\f1a8"; +} +.fa-drupal:before { + content: "\f1a9"; +} +.fa-joomla:before { + content: "\f1aa"; +} +.fa-language:before { + content: "\f1ab"; +} +.fa-fax:before { + content: "\f1ac"; +} +.fa-building:before { + content: "\f1ad"; +} +.fa-child:before { + content: "\f1ae"; +} +.fa-paw:before { + content: "\f1b0"; +} +.fa-spoon:before { + content: "\f1b1"; +} +.fa-cube:before { + content: "\f1b2"; +} +.fa-cubes:before { + content: "\f1b3"; +} +.fa-behance:before { + content: "\f1b4"; +} +.fa-behance-square:before { + content: "\f1b5"; +} +.fa-steam:before { + content: "\f1b6"; +} +.fa-steam-square:before { + content: "\f1b7"; +} +.fa-recycle:before { + content: "\f1b8"; +} +.fa-automobile:before, +.fa-car:before { + content: "\f1b9"; +} +.fa-cab:before, +.fa-taxi:before { + content: "\f1ba"; +} +.fa-tree:before { + content: "\f1bb"; +} +.fa-spotify:before { + content: "\f1bc"; +} +.fa-deviantart:before { + content: "\f1bd"; +} +.fa-soundcloud:before { + content: "\f1be"; +} +.fa-database:before { + content: "\f1c0"; +} +.fa-file-pdf-o:before { + content: "\f1c1"; +} +.fa-file-word-o:before { + content: "\f1c2"; +} +.fa-file-excel-o:before { + content: "\f1c3"; +} +.fa-file-powerpoint-o:before { + content: "\f1c4"; +} +.fa-file-photo-o:before, +.fa-file-picture-o:before, +.fa-file-image-o:before { + content: "\f1c5"; +} +.fa-file-zip-o:before, +.fa-file-archive-o:before { + content: "\f1c6"; +} +.fa-file-sound-o:before, +.fa-file-audio-o:before { + content: "\f1c7"; +} +.fa-file-movie-o:before, +.fa-file-video-o:before { + content: "\f1c8"; +} +.fa-file-code-o:before { + content: "\f1c9"; +} +.fa-vine:before { + content: "\f1ca"; +} +.fa-codepen:before { + content: "\f1cb"; +} +.fa-jsfiddle:before { + content: "\f1cc"; +} +.fa-life-bouy:before, +.fa-life-buoy:before, +.fa-life-saver:before, +.fa-support:before, +.fa-life-ring:before { + content: "\f1cd"; +} +.fa-circle-o-notch:before { + content: "\f1ce"; +} +.fa-ra:before, +.fa-rebel:before { + content: "\f1d0"; +} +.fa-ge:before, +.fa-empire:before { + content: "\f1d1"; +} +.fa-git-square:before { + content: "\f1d2"; +} +.fa-git:before { + content: "\f1d3"; +} +.fa-y-combinator-square:before, +.fa-yc-square:before, +.fa-hacker-news:before { + content: "\f1d4"; +} +.fa-tencent-weibo:before { + content: "\f1d5"; +} +.fa-qq:before { + content: "\f1d6"; +} +.fa-wechat:before, +.fa-weixin:before { + content: "\f1d7"; +} +.fa-send:before, +.fa-paper-plane:before { + content: "\f1d8"; +} +.fa-send-o:before, +.fa-paper-plane-o:before { + content: "\f1d9"; +} +.fa-history:before { + content: "\f1da"; +} +.fa-circle-thin:before { + content: "\f1db"; +} +.fa-header:before { + content: "\f1dc"; +} +.fa-paragraph:before { + content: "\f1dd"; +} +.fa-sliders:before { + content: "\f1de"; +} +.fa-share-alt:before { + content: "\f1e0"; +} +.fa-share-alt-square:before { + content: "\f1e1"; +} +.fa-bomb:before { + content: "\f1e2"; +} +.fa-soccer-ball-o:before, +.fa-futbol-o:before { + content: "\f1e3"; +} +.fa-tty:before { + content: "\f1e4"; +} +.fa-binoculars:before { + content: "\f1e5"; +} +.fa-plug:before { + content: "\f1e6"; +} +.fa-slideshare:before { + content: "\f1e7"; +} +.fa-twitch:before { + content: "\f1e8"; +} +.fa-yelp:before { + content: "\f1e9"; +} +.fa-newspaper-o:before { + content: "\f1ea"; +} +.fa-wifi:before { + content: "\f1eb"; +} +.fa-calculator:before { + content: "\f1ec"; +} +.fa-paypal:before { + content: "\f1ed"; +} +.fa-google-wallet:before { + content: "\f1ee"; +} +.fa-cc-visa:before { + content: "\f1f0"; +} +.fa-cc-mastercard:before { + content: "\f1f1"; +} +.fa-cc-discover:before { + content: "\f1f2"; +} +.fa-cc-amex:before { + content: "\f1f3"; +} +.fa-cc-paypal:before { + content: "\f1f4"; +} +.fa-cc-stripe:before { + content: "\f1f5"; +} +.fa-bell-slash:before { + content: "\f1f6"; +} +.fa-bell-slash-o:before { + content: "\f1f7"; +} +.fa-trash:before { + content: "\f1f8"; +} +.fa-copyright:before { + content: "\f1f9"; +} +.fa-at:before { + content: "\f1fa"; +} +.fa-eyedropper:before { + content: "\f1fb"; +} +.fa-paint-brush:before { + content: "\f1fc"; +} +.fa-birthday-cake:before { + content: "\f1fd"; +} +.fa-area-chart:before { + content: "\f1fe"; +} +.fa-pie-chart:before { + content: "\f200"; +} +.fa-line-chart:before { + content: "\f201"; +} +.fa-lastfm:before { + content: "\f202"; +} +.fa-lastfm-square:before { + content: "\f203"; +} +.fa-toggle-off:before { + content: "\f204"; +} +.fa-toggle-on:before { + content: "\f205"; +} +.fa-bicycle:before { + content: "\f206"; +} +.fa-bus:before { + content: "\f207"; +} +.fa-ioxhost:before { + content: "\f208"; +} +.fa-angellist:before { + content: "\f209"; +} +.fa-cc:before { + content: "\f20a"; +} +.fa-shekel:before, +.fa-sheqel:before, +.fa-ils:before { + content: "\f20b"; +} +.fa-meanpath:before { + content: "\f20c"; +} +.fa-buysellads:before { + content: "\f20d"; +} +.fa-connectdevelop:before { + content: "\f20e"; +} +.fa-dashcube:before { + content: "\f210"; +} +.fa-forumbee:before { + content: "\f211"; +} +.fa-leanpub:before { + content: "\f212"; +} +.fa-sellsy:before { + content: "\f213"; +} +.fa-shirtsinbulk:before { + content: "\f214"; +} +.fa-simplybuilt:before { + content: "\f215"; +} +.fa-skyatlas:before { + content: "\f216"; +} +.fa-cart-plus:before { + content: "\f217"; +} +.fa-cart-arrow-down:before { + content: "\f218"; +} +.fa-diamond:before { + content: "\f219"; +} +.fa-ship:before { + content: "\f21a"; +} +.fa-user-secret:before { + content: "\f21b"; +} +.fa-motorcycle:before { + content: "\f21c"; +} +.fa-street-view:before { + content: "\f21d"; +} +.fa-heartbeat:before { + content: "\f21e"; +} +.fa-venus:before { + content: "\f221"; +} +.fa-mars:before { + content: "\f222"; +} +.fa-mercury:before { + content: "\f223"; +} +.fa-intersex:before, +.fa-transgender:before { + content: "\f224"; +} +.fa-transgender-alt:before { + content: "\f225"; +} +.fa-venus-double:before { + content: "\f226"; +} +.fa-mars-double:before { + content: "\f227"; +} +.fa-venus-mars:before { + content: "\f228"; +} +.fa-mars-stroke:before { + content: "\f229"; +} +.fa-mars-stroke-v:before { + content: "\f22a"; +} +.fa-mars-stroke-h:before { + content: "\f22b"; +} +.fa-neuter:before { + content: "\f22c"; +} +.fa-genderless:before { + content: "\f22d"; +} +.fa-facebook-official:before { + content: "\f230"; +} +.fa-pinterest-p:before { + content: "\f231"; +} +.fa-whatsapp:before { + content: "\f232"; +} +.fa-server:before { + content: "\f233"; +} +.fa-user-plus:before { + content: "\f234"; +} +.fa-user-times:before { + content: "\f235"; +} +.fa-hotel:before, +.fa-bed:before { + content: "\f236"; +} +.fa-viacoin:before { + content: "\f237"; +} +.fa-train:before { + content: "\f238"; +} +.fa-subway:before { + content: "\f239"; +} +.fa-medium:before { + content: "\f23a"; +} +.fa-yc:before, +.fa-y-combinator:before { + content: "\f23b"; +} +.fa-optin-monster:before { + content: "\f23c"; +} +.fa-opencart:before { + content: "\f23d"; +} +.fa-expeditedssl:before { + content: "\f23e"; +} +.fa-battery-4:before, +.fa-battery-full:before { + content: "\f240"; +} +.fa-battery-3:before, +.fa-battery-three-quarters:before { + content: "\f241"; +} +.fa-battery-2:before, +.fa-battery-half:before { + content: "\f242"; +} +.fa-battery-1:before, +.fa-battery-quarter:before { + content: "\f243"; +} +.fa-battery-0:before, +.fa-battery-empty:before { + content: "\f244"; +} +.fa-mouse-pointer:before { + content: "\f245"; +} +.fa-i-cursor:before { + content: "\f246"; +} +.fa-object-group:before { + content: "\f247"; +} +.fa-object-ungroup:before { + content: "\f248"; +} +.fa-sticky-note:before { + content: "\f249"; +} +.fa-sticky-note-o:before { + content: "\f24a"; +} +.fa-cc-jcb:before { + content: "\f24b"; +} +.fa-cc-diners-club:before { + content: "\f24c"; +} +.fa-clone:before { + content: "\f24d"; +} +.fa-balance-scale:before { + content: "\f24e"; +} +.fa-hourglass-o:before { + content: "\f250"; +} +.fa-hourglass-1:before, +.fa-hourglass-start:before { + content: "\f251"; +} +.fa-hourglass-2:before, +.fa-hourglass-half:before { + content: "\f252"; +} +.fa-hourglass-3:before, +.fa-hourglass-end:before { + content: "\f253"; +} +.fa-hourglass:before { + content: "\f254"; +} +.fa-hand-grab-o:before, +.fa-hand-rock-o:before { + content: "\f255"; +} +.fa-hand-stop-o:before, +.fa-hand-paper-o:before { + content: "\f256"; +} +.fa-hand-scissors-o:before { + content: "\f257"; +} +.fa-hand-lizard-o:before { + content: "\f258"; +} +.fa-hand-spock-o:before { + content: "\f259"; +} +.fa-hand-pointer-o:before { + content: "\f25a"; +} +.fa-hand-peace-o:before { + content: "\f25b"; +} +.fa-trademark:before { + content: "\f25c"; +} +.fa-registered:before { + content: "\f25d"; +} +.fa-creative-commons:before { + content: "\f25e"; +} +.fa-gg:before { + content: "\f260"; +} +.fa-gg-circle:before { + content: "\f261"; +} +.fa-tripadvisor:before { + content: "\f262"; +} +.fa-odnoklassniki:before { + content: "\f263"; +} +.fa-odnoklassniki-square:before { + content: "\f264"; +} +.fa-get-pocket:before { + content: "\f265"; +} +.fa-wikipedia-w:before { + content: "\f266"; +} +.fa-safari:before { + content: "\f267"; +} +.fa-chrome:before { + content: "\f268"; +} +.fa-firefox:before { + content: "\f269"; +} +.fa-opera:before { + content: "\f26a"; +} +.fa-internet-explorer:before { + content: "\f26b"; +} +.fa-tv:before, +.fa-television:before { + content: "\f26c"; +} +.fa-contao:before { + content: "\f26d"; +} +.fa-500px:before { + content: "\f26e"; +} +.fa-amazon:before { + content: "\f270"; +} +.fa-calendar-plus-o:before { + content: "\f271"; +} +.fa-calendar-minus-o:before { + content: "\f272"; +} +.fa-calendar-times-o:before { + content: "\f273"; +} +.fa-calendar-check-o:before { + content: "\f274"; +} +.fa-industry:before { + content: "\f275"; +} +.fa-map-pin:before { + content: "\f276"; +} +.fa-map-signs:before { + content: "\f277"; +} +.fa-map-o:before { + content: "\f278"; +} +.fa-map:before { + content: "\f279"; +} +.fa-commenting:before { + content: "\f27a"; +} +.fa-commenting-o:before { + content: "\f27b"; +} +.fa-houzz:before { + content: "\f27c"; +} +.fa-vimeo:before { + content: "\f27d"; +} +.fa-black-tie:before { + content: "\f27e"; +} +.fa-fonticons:before { + content: "\f280"; +} +.fa-reddit-alien:before { + content: "\f281"; +} +.fa-edge:before { + content: "\f282"; +} +.fa-credit-card-alt:before { + content: "\f283"; +} +.fa-codiepie:before { + content: "\f284"; +} +.fa-modx:before { + content: "\f285"; +} +.fa-fort-awesome:before { + content: "\f286"; +} +.fa-usb:before { + content: "\f287"; +} +.fa-product-hunt:before { + content: "\f288"; +} +.fa-mixcloud:before { + content: "\f289"; +} +.fa-scribd:before { + content: "\f28a"; +} +.fa-pause-circle:before { + content: "\f28b"; +} +.fa-pause-circle-o:before { + content: "\f28c"; +} +.fa-stop-circle:before { + content: "\f28d"; +} +.fa-stop-circle-o:before { + content: "\f28e"; +} +.fa-shopping-bag:before { + content: "\f290"; +} +.fa-shopping-basket:before { + content: "\f291"; +} +.fa-hashtag:before { + content: "\f292"; +} +.fa-bluetooth:before { + content: "\f293"; +} +.fa-bluetooth-b:before { + content: "\f294"; +} +.fa-percent:before { + content: "\f295"; +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/FontAwesome.otf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/FontAwesome.otf new file mode 100644 index 0000000000..3ed7f8b48a Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/FontAwesome.otf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.eot b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000000..9b6afaedc0 Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.eot differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.svg b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000000..d05688e9e2 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.svg @@ -0,0 +1,655 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000000..26dea7951a Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.woff b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000000..dc35ce3c2c Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.woff differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.woff2 b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000000..500e517253 Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fonts/fontawesome-webfont.woff2 differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt new file mode 100755 index 0000000000..75b52484ea --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Bold.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Bold.ttf new file mode 100755 index 0000000000..fd79d43bea Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Bold.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Italic.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Italic.ttf new file mode 100755 index 0000000000..c90da48ff3 Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Italic.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Light.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Light.ttf new file mode 100755 index 0000000000..0d381897da Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Light.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-LightItalic.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-LightItalic.ttf new file mode 100755 index 0000000000..68299c4bc6 Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-LightItalic.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Regular.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Regular.ttf new file mode 100755 index 0000000000..db433349b7 Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Regular.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/OFL.txt b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/OFL.txt new file mode 100755 index 0000000000..3b859d9138 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/OFL.txt @@ -0,0 +1,93 @@ +Copyright (c) 2009, Matt McInerney (matt@pixelspread.com), +with Reserved Font Name Orbitron. +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/Orbitron-Regular.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/Orbitron-Regular.ttf new file mode 100755 index 0000000000..42563d6b6e Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/Orbitron-Regular.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/OFL.txt b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/OFL.txt new file mode 100755 index 0000000000..ff7febddcb --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/OFL.txt @@ -0,0 +1,92 @@ +Copyright (c) 2011-2012, Vernon Adams (vern@newtypography.co.uk), with Reserved Font Names 'Oswald' +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/Oswald-Regular.ttf b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/Oswald-Regular.ttf new file mode 100755 index 0000000000..0798e24195 Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/Oswald-Regular.ttf differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-buildfail.ico b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-buildfail.ico new file mode 100644 index 0000000000..8fdb76e344 Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-buildfail.ico differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-fail.ico b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-fail.ico new file mode 100644 index 0000000000..e028baefab Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-fail.ico differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-ok.ico b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-ok.ico new file mode 100644 index 0000000000..19f0e173de Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-ok.ico differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-panic.ico b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-panic.ico new file mode 100644 index 0000000000..46b1bd085a Binary files /dev/null and b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-panic.ico differ diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/composer.js b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/composer.js new file mode 100644 index 0000000000..7ddb0c8d01 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/composer.js @@ -0,0 +1,171 @@ +var composer = { + tab: "\t", + template: "", + isFunc: function(scope) + { + if (!scope.title || typeof scope.depth === 'undefined') + return false; + + return scope.title.indexOf("Test") === 0 && scope.depth === 0; + }, + discardLastKey: false +}; + + +$(function() +{ + // Begin layout sizing + var headerHeight = $('header').outerHeight(); + var padding = $('#input, #output').css('padding-top').replace("px", "") * 2 + 1; + var outputPlaceholder = $('#output').text(); + + $(window).resize(function() + { + $('#input, #output').height($(window).height() - headerHeight - padding); + }); + + $(window).resize(); + // End layout sizing + + + $('#input').keydown(function(e) + { + // 13=Enter, 16=Shift + composer.discardLastKey = e.keyCode === 13 + || e.keyCode === 16; + }).keyup(function(e) + { + if (!composer.discardLastKey) + generate($(this).val()); + }); + + composer.template = $('#tpl-convey').text(); + + tabOverride.set(document.getElementById('input')); + $('#input').focus(); +}); + + + +// Begin Markup.js custom pipes +Mark.pipes.recursivelyRender = function(val) +{ + return !val || val.length === 0 ? "\n" : Mark.up(composer.template, val); +} + +Mark.pipes.indent = function(val) +{ + return new Array(val + 1).join("\t"); +} + +Mark.pipes.notTestFunc = function(scope) +{ + return !composer.isFunc(scope); +} + +Mark.pipes.safeFunc = function(val) +{ + return val.replace(/[^a-z0-9_]/gi, ''); +} + +Mark.pipes.properCase = function(str) +{ + if (str.length === 0) + return ""; + + str = str.charAt(0).toUpperCase() + str.substr(1); + + if (str.length < 2) + return str; + + return str.replace(/[\s_][a-z]+/g, function(txt) + { + return txt.charAt(0) + + txt.charAt(1).toUpperCase() + + txt.substr(2).toLowerCase(); + }); +} + +Mark.pipes.showImports = function(item) +{ + console.log(item); + if (root.title === "(root)" && root.stories.length > 0) + return 'import (\n\t"testing"\n\t. "github.com/smartystreets/goconvey/convey"\n)\n'; + else + return ""; +} +// End Markup.js custom pipes + + +function generate(input) +{ + var root = parseInput(input); + $('#output').text(Mark.up(composer.template, root.stories)); + if (root.stories.length > 0 && root.stories[0].title.substr(0, 4) === "Test") + $('#output').prepend('import (\n\t"testing"\n\t. "github.com/smartystreets/goconvey/convey"\n)\n\n'); +} + +function parseInput(input) +{ + lines = input.split("\n"); + + if (!lines) + return; + + var root = { + title: "(root)", + stories: [] + }; + + for (i in lines) + { + line = lines[i]; + lineText = $.trim(line); + + if (!lineText) + continue; + + // Figure out how deep to put this story + indent = line.match(new RegExp("^" + composer.tab + "+")); + tabs = indent ? indent[0].length / composer.tab.length : 0; + + // Starting at root, traverse into the right spot in the arrays + var curScope = root, prevScope = root; + for (j = 0; j < tabs && curScope.stories.length > 0; j++) + { + curScope = curScope.stories[curScope.stories.length - 1]; + prevScope = curScope; + } + + // Don't go crazy, though! (avoid excessive indentation) + if (tabs > curScope.depth + 1) + tabs = curScope.depth + 1; + + // Only top-level Convey() calls need the *testing.T object passed in + var showT = composer.isFunc(prevScope) + || (!composer.isFunc(curScope) + && tabs === 0); + + // Save the story at this scope + curScope.stories.push({ + title: lineText.replace(/"/g, "\\\""), // escape quotes + stories: [], + depth: tabs, + showT: showT + }); + } + + return root; +} + +function suppress(event) +{ + if (!event) + return false; + if (event.preventDefault) + event.preventDefault(); + if (event.stopPropagation) + event.stopPropagation(); + event.cancelBubble = true; + return false; +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/config.js b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/config.js new file mode 100644 index 0000000000..0ca1e457bd --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/config.js @@ -0,0 +1,15 @@ +// Configure the GoConvey web UI client in here + +convey.config = { + + // Install new themes by adding them here; the first one will be default + themes: { + "dark": { name: "Dark", filename: "dark.css", coverage: "hsla({{hue}}, 75%, 30%, .5)" }, + "dark-bigtext": { name: "Dark-BigText", filename: "dark-bigtext.css", coverage: "hsla({{hue}}, 75%, 30%, .5)" }, + "light": { name: "Light", filename: "light.css", coverage: "hsla({{hue}}, 62%, 75%, 1)" } + }, + + // Path to the themes (end with forward-slash) + themePath: "/resources/css/themes/" + +}; diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/convey.js b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/convey.js new file mode 100644 index 0000000000..b4e6b525ec --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/convey.js @@ -0,0 +1,46 @@ +var convey = { + + // *** Don't edit in here unless you're brave *** + + statuses: { // contains some constants related to overall test status + pass: { class: 'ok', text: "Pass" }, // class name must also be that in the favicon file name + fail: { class: 'fail', text: "Fail" }, + panic: { class: 'panic', text: "Panic" }, + buildfail: { class: 'buildfail', text: "Build Failure" } + }, + frameCounter: 0, // gives each frame a unique ID + maxHistory: 20, // how many tests to keep in the history + notif: undefined, // the notification currently being displayed + notifTimer: undefined, // the timer that clears the notifications automatically + poller: new Poller(), // the server poller + status: "", // what the _server_ is currently doing (not overall test results) + overallClass: "", // class name of the "overall" status banner + theme: "", // theme currently being used + packageStates: {}, // packages manually collapsed or expanded during this page's lifetime + uiEffects: true, // whether visual effects are enabled + framesOnSamePath: 0, // number of consecutive frames on this same watch path + layout: { + selClass: "sel", // CSS class when an element is "selected" + header: undefined, // container element of the header area (overall, controls) + frame: undefined, // container element of the main body area (above footer) + footer: undefined // container element of the footer (stuck to bottom) + }, + history: [], // complete history of states (test results and aggregated data), including the current one + moments: {}, // elements that display time relative to the current time, keyed by ID, with the moment() as a value + intervals: {}, // ntervals that execute periodically + intervalFuncs: { // functions executed by each interval in convey.intervals + time: function() + { + var t = new Date(); + var h = zerofill(t.getHours(), 2); + var m = zerofill(t.getMinutes(), 2); + var s = zerofill(t.getSeconds(), 2); + $('#time').text(h + ":" + m + ":" + s); + }, + momentjs: function() + { + for (var id in convey.moments) + $('#'+id).html(convey.moments[id].fromNow()); + } + } +}; diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/goconvey.js b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/goconvey.js new file mode 100644 index 0000000000..fcd2e70c0f --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/goconvey.js @@ -0,0 +1,1373 @@ +$(init); + +$(window).load(function() +{ + // Things may shift after all the elements (images/fonts) are loaded + // In Chrome, calling reframe() doesn't work (maybe a quirk); we need to trigger resize + $(window).resize(); +}); + +function init() +{ + log("Welcome to GoConvey!"); + log("Initializing interface"); + convey.overall = emptyOverall(); + loadTheme(); + $('body').show(); + initPoller(); + wireup(); + latest(); +} + +function loadTheme(thmID) +{ + var defaultTheme = "dark"; + var linkTagId = "themeRef"; + + if (!thmID) + thmID = get('theme') || defaultTheme; + + log("Initializing theme: " + thmID); + + if (!convey.config.themes[thmID]) + { + replacement = Object.keys(convey.config.themes)[0] || defaultTheme; + log("NOTICE: Could not find '" + thmID + "' theme; defaulting to '" + replacement + "'"); + thmID = replacement; + } + + convey.theme = thmID; + save('theme', convey.theme); + + var linkTag = $('#'+linkTagId); + var fullPath = convey.config.themePath + + convey.config.themes[convey.theme].filename; + + if (linkTag.length === 0) + { + $('head').append(''); + } + else + linkTag.attr('href', fullPath); + + colorizeCoverageBars(); +} + +function initPoller() +{ + $(convey.poller).on('serverstarting', function(event) + { + log("Server is starting..."); + convey.status = "starting"; + showServerDown("Server starting"); + $('#run-tests').addClass('spin-slowly disabled'); + }); + + $(convey.poller).on('pollsuccess', function(event, data) + { + if (convey.status !== "starting") + hideServerDown(); + + // These two if statements determine if the server is now busy + // (and wasn't before) or is not busy (regardless of whether it was before) + if ((!convey.status || convey.status === "idle") + && data.status && data.status !== "idle") + $('#run-tests').addClass('spin-slowly disabled'); + else if (convey.status !== "idle" && data.status === "idle") + { + $('#run-tests').removeClass('spin-slowly disabled'); + } + + switch (data.status) + { + case "executing": + $(convey.poller).trigger('serverexec', data); + break; + case "idle": + $(convey.poller).trigger('serveridle', data); + break; + } + + convey.status = data.status; + }); + + $(convey.poller).on('pollfail', function(event, data) + { + log("Poll failed; server down"); + convey.status = "down"; + showServerDown("Server down"); + }); + + $(convey.poller).on('serverexec', function(event, data) + { + log("Server status: executing"); + $('.favicon').attr('href', '/favicon.ico'); // indicates running tests + }); + + $(convey.poller).on('serveridle', function(event, data) + { + log("Server status: idle"); + log("Tests have finished executing"); + latest(); + }); + + convey.poller.start(); +} + +function wireup() +{ + log("Wireup"); + + customMarkupPipes(); + + var themes = []; + for (var k in convey.config.themes) + themes.push({ id: k, name: convey.config.themes[k].name }); + $('#theme').html(render('tpl-theme-enum', themes)); + + enumSel("theme", convey.theme); + + loadSettingsFromStorage(); + + $('#stories').on('click', '.toggle-all-pkg', function(event) + { + if ($(this).closest('.story-pkg').data('pkg-state') === "expanded") + collapseAll(); + else + expandAll(); + return suppress(event); + }); + + // Wireup the settings switches + $('.enum#theme').on('click', 'li:not(.sel)', function() + { + loadTheme($(this).data('theme')); + }); + $('.enum#pkg-expand-collapse').on('click', 'li:not(.sel)', function() + { + var newSetting = $(this).data('pkg-expand-collapse'); + convey.packageStates = {}; + save('pkg-expand-collapse', newSetting); + if (newSetting === "expanded") + expandAll(); + else + collapseAll(); + }); + $('.enum#show-debug-output').on('click', 'li:not(.sel)', function() + { + var newSetting = $(this).data('show-debug-output'); + save('show-debug-output', newSetting); + setDebugOutputUI(newSetting); + }); + $('.enum#ui-effects').on('click', 'li:not(.sel)', function() + { + var newSetting = $(this).data('ui-effects'); + convey.uiEffects = newSetting; + save('ui-effects', newSetting); + }); + // End settings wireup + + //wireup the notification-settings switches + $('.enum#notification').on('click', 'li:not(.sel)', function() + { + var enabled = $(this).data('notification'); + log("Turning notifications " + enabled ? 'on' : 'off'); + save('notifications', enabled); + + if (notif() && 'Notification' in window) + { + if (Notification.permission !== 'denied') + { + Notification.requestPermission(function(per) + { + if (!('permission' in Notification)) + { + Notification.permission = per; + } + }); + } + else + log("Permission denied to show desktop notification"); + } + + setNotifUI() + }); + + $('.enum#notification-level').on('click', 'li:not(.sel)', function() + { + var level = $(this).data('notification-level'); + convey.notificationLevel = level; + save('notification-level', level); + }); + // End notification-settings + + convey.layout.header = $('header').first(); + convey.layout.frame = $('.frame').first(); + convey.layout.footer = $('footer').last(); + + updateWatchPath(); + + $('#path').change(function() + { + // Updates the watched directory with the server and makes sure it exists + var tb = $(this); + var newpath = encodeURIComponent($.trim(tb.val())); + $.post('/watch?root='+newpath) + .done(function() { tb.removeClass('error'); }) + .fail(function() { tb.addClass('error'); }); + convey.framesOnSamePath = 1; + }); + + $('#run-tests').click(function() + { + var self = $(this); + if (self.hasClass('spin-slowly') || self.hasClass('disabled')) + return; + log("Test run invoked from web UI"); + $.get("/execute"); + }); + + $('#play-pause').click(function() + { + $.get('/pause'); + + if ($(this).hasClass(convey.layout.selClass)) + { + // Un-pausing + if (!$('footer .replay').is(':visible')) + $('footer .recording').show(); + $('footer .paused').hide(); + log("Resuming auto-execution of tests"); + } + else + { + // Pausing + $('footer .recording').hide(); + $('footer .paused').show(); + log("Pausing auto-execution of tests"); + } + + $(this).toggleClass("throb " + convey.layout.selClass); + }); + + $('#toggle-notif').click(function() + { + toggle($('.settings-notification'), $(this)); + }); + + $('#show-history').click(function() + { + toggle($('.history'), $(this)); + }); + + $('#show-settings').click(function() + { + toggle($('.settings-general'), $(this)); + }); + + $('#show-gen').click(function() { + var writer = window.open("/composer.html"); + if (window.focus) + writer.focus(); + }); + + $('.toggler').not('.narrow').prepend(''); + $('.toggler.narrow').prepend(''); + + $('.toggler').not('.narrow').click(function() + { + var target = $('#' + $(this).data('toggle')); + $('.fa-angle-down, .fa-angle-up', this).toggleClass('fa-angle-down fa-angle-up'); + target.toggle(); + }); + + $('.toggler.narrow').click(function() + { + var target = $('#' + $(this).data('toggle')); + $('.fa-angle-down, .fa-angle-up', this).toggleClass('fa-angle-down fa-angle-up'); + target.toggleClass('hide-narrow show-narrow'); + }); + + // Enumerations are horizontal lists where one item can be selected at a time + $('.enum').on('click', 'li', enumSel); + + // Start ticking time + convey.intervals.time = setInterval(convey.intervalFuncs.time, 1000); + convey.intervals.momentjs = setInterval(convey.intervalFuncs.momentjs, 5000); + convey.intervalFuncs.time(); + + // Ignore/un-ignore package + $('#stories').on('click', '.fa.ignore', function(event) + { + var pkg = $(this).data('pkg'); + if ($(this).hasClass('disabled')) + return; + else if ($(this).hasClass('unwatch')) + $.get("/ignore", { paths: pkg }); + else + $.get("/reinstate", { paths: pkg }); + $(this).toggleClass('watch unwatch fa-eye fa-eye-slash clr-red'); + return suppress(event); + }); + + // Show "All" link when hovering the toggler on packages in the stories + $('#stories').on({ + mouseenter: function() { $('.toggle-all-pkg', this).stop().show('fast'); }, + mouseleave: function() { $('.toggle-all-pkg', this).stop().hide('fast'); } + }, '.pkg-toggle-container'); + + // Toggle a package in the stories when clicked + $('#stories').on('click', '.story-pkg', function(event) + { + togglePackage(this, true); + return suppress(event); + }); + + // Select a story line when it is clicked + $('#stories').on('click', '.story-line', function() + { + $('.story-line-sel').not(this).removeClass('story-line-sel'); + $(this).toggleClass('story-line-sel'); + }); + + // Render a frame from the history when clicked + $('.history .container').on('click', '.item', function(event) + { + var frame = getFrame($(this).data("frameid")); + changeStatus(frame.overall.status, true); + renderFrame(frame); + $(this).addClass('selected'); + + // Update current status down in the footer + if ($(this).is(':first-child')) + { + // Now on current frame + $('footer .replay').hide(); + + if ($('#play-pause').hasClass(convey.layout.selClass)) // Was/is paused + $('footer .paused').show(); + else + $('footer .recording').show(); // Was/is recording + } + else + { + $('footer .recording, footer .replay').hide(); + $('footer .replay').show(); + } + return suppress(event); + }); + + $('footer').on('click', '.replay', function() + { + // Clicking "REPLAY" in the corner should bring them back to the current frame + // and hide, if visible, the history panel for convenience + $('.history .item:first-child').click(); + if ($('#show-history').hasClass('sel')) + $('#show-history').click(); + }); + + // Keyboard shortcuts! + $(document).keydown(function(e) + { + if (e.ctrlKey || e.metaKey || e.shiftKey) + return; + + switch (e.keyCode) + { + case 67: // c + $('#show-gen').click(); + break; + case 82: // r + $('#run-tests').click(); + break; + case 78: // n + $('#toggle-notif').click(); + break; + case 87: // w + $('#path').focus(); + break; + case 80: // p + $('#play-pause').click(); + break; + } + + return suppress(e); + }); + $('body').on('keydown', 'input, textarea, select', function(e) + { + // If user is typing something, don't let this event bubble + // up to the document to annoyingly fire keyboard shortcuts + e.stopPropagation(); + }); + + // Wire-up the tipsy tooltips + setTooltips(); + + // Keep everything positioned and sized properly on window resize + reframe(); + $(window).resize(reframe); +} + +function setTooltips() +{ + var tips = { + '#path': { delayIn: 500 }, + '#logo': { gravity: 'w' }, + '.controls li, .pkg-cover-name': { live: false }, + 'footer .replay': { live: false, gravity: 'e' }, + '.ignore': { live: false, gravity: $.fn.tipsy.autoNS }, + '.disabled': { live: false, gravity: $.fn.tipsy.autoNS } + }; + + for (var key in tips) + { + $(key).each(function(el) + { + if(!$(this).tipsy(true)) + $(this).tipsy(tips[key]); + }); + } +} + +function setDebugOutputUI(newSetting){ + var $storyLine = $('.story-line'); + switch(newSetting) { + case 'hide': + $('.message', $storyLine).hide(); + break; + case 'fail': + $('.message', $storyLine.not('.fail, .panic')).hide(); + $('.message', $storyLine.filter('.fail, .panic')).show(); + break; + default: + $('.message', $storyLine).show(); + break; + } +} + +function setNotifUI() +{ + var $toggleNotif = $('#toggle-notif').addClass(notif() ? "fa-bell" : "fa-bell-o"); + $toggleNotif.removeClass(!notif() ? "fa-bell" : "fa-bell-o"); +} + +function expandAll() +{ + $('.story-pkg').each(function() { expandPackage($(this).data('pkg')); }); +} + +function collapseAll() +{ + $('.story-pkg').each(function() { collapsePackage($(this).data('pkg')); }); +} + +function expandPackage(pkgId) +{ + var pkg = $('.story-pkg.pkg-'+pkgId); + var rows = $('.story-line.pkg-'+pkgId); + + pkg.data('pkg-state', "expanded").addClass('expanded').removeClass('collapsed'); + + $('.pkg-toggle', pkg) + .addClass('fa-minus-square-o') + .removeClass('fa-plus-square-o'); + + rows.show(); +} + +function collapsePackage(pkgId) +{ + var pkg = $('.story-pkg.pkg-'+pkgId); + var rows = $('.story-line.pkg-'+pkgId); + + pkg.data('pkg-state', "collapsed").addClass('collapsed').removeClass('expanded'); + + $('.pkg-toggle', pkg) + .addClass('fa-plus-square-o') + .removeClass('fa-minus-square-o'); + + rows.hide(); +} + +function togglePackage(storyPkgElem) +{ + var pkgId = $(storyPkgElem).data('pkg'); + if ($(storyPkgElem).data('pkg-state') === "expanded") + { + collapsePackage(pkgId); + convey.packageStates[$(storyPkgElem).data('pkg-name')] = "collapsed"; + } + else + { + expandPackage(pkgId); + convey.packageStates[$(storyPkgElem).data('pkg-name')] = "expanded"; + } +} + +function loadSettingsFromStorage() +{ + var pkgExpCollapse = get("pkg-expand-collapse"); + if (!pkgExpCollapse) + { + pkgExpCollapse = "expanded"; + save("pkg-expand-collapse", pkgExpCollapse); + } + enumSel("pkg-expand-collapse", pkgExpCollapse); + + var showDebugOutput = get("show-debug-output"); + if (!showDebugOutput) + { + showDebugOutput = "show"; + save("show-debug-output", showDebugOutput); + } + enumSel("show-debug-output", showDebugOutput); + + var uiEffects = get("ui-effects"); + if (uiEffects === null) + uiEffects = "true"; + convey.uiEffects = uiEffects === "true"; + enumSel("ui-effects", uiEffects); + + enumSel("notification", ""+notif()); + var notifLevel = get("notification-level"); + if (notifLevel === null) + { + notifLevel = '.*'; + } + convey.notificationLevel = notifLevel; + enumSel("notification-level", notifLevel); + + setNotifUI(); +} + + + + + + + + + + + +function latest() +{ + log("Fetching latest test results"); + $.getJSON("/latest", process); +} + +function process(data, status, jqxhr) +{ + if (!data || !data.Revision) + { + log("No data received or revision timestamp was missing"); + return; + } + + if (data.Paused && !$('#play-pause').hasClass(convey.layout.selClass)) + { + $('footer .recording').hide(); + $('footer .paused').show(); + $('#play-pause').toggleClass("throb " + convey.layout.selClass); + } + + if (current() && data.Revision === current().results.Revision) + { + log("No changes"); + changeStatus(current().overall.status); // re-assures that status is unchanged + return; + } + + + // Put the new frame in the queue so we can use current() to get to it + convey.history.push(newFrame()); + convey.framesOnSamePath++; + + // Store the raw results in our frame + current().results = data; + + log("Updating watch path"); + updateWatchPath(); + + // Remove all templated items from the DOM as we'll + // replace them with new ones; also remove tipsy tooltips + // that may have lingered around + $('.templated, .tipsy').remove(); + + var uniqueID = 0; + var coverageAvgHelper = { countedPackages: 0, coverageSum: 0 }; + var packages = { + tested: [], + ignored: [], + coverage: {}, + nogofiles: [], + notestfiles: [], + notestfn: [] + }; + + log("Compiling package statistics"); + + // Look for failures and panics through the packages->tests->stories... + for (var i in data.Packages) + { + pkg = makeContext(data.Packages[i]); + current().overall.duration += pkg.Elapsed; + pkg._id = uniqueID++; + + if (pkg.Outcome === "build failure") + { + current().overall.failedBuilds++; + current().failedBuilds.push(pkg); + continue; + } + + + if (pkg.Outcome === "no go code") + packages.nogofiles.push(pkg); + else if (pkg.Outcome === "no test files") + packages.notestfiles.push(pkg); + else if (pkg.Outcome === "no test functions") + packages.notestfn.push(pkg); + else if (pkg.Outcome === "ignored" || pkg.Outcome === "disabled") + packages.ignored.push(pkg); + else + { + if (pkg.Coverage >= 0) + coverageAvgHelper.coverageSum += pkg.Coverage; + coverageAvgHelper.countedPackages++; + packages.coverage[pkg.PackageName] = pkg.Coverage; + packages.tested.push(pkg); + } + + + for (var j in pkg.TestResults) + { + test = makeContext(pkg.TestResults[j]); + test._id = uniqueID++; + test._pkgid = pkg._id; + test._pkg = pkg.PackageName; + + if (test.Stories.length === 0) + { + // Here we've got ourselves a classic Go test, + // not a GoConvey test that has stories and assertions + // so we'll treat this whole test as a single assertion + current().overall.assertions++; + + if (test.Error) + { + test._status = convey.statuses.panic; + pkg._panicked++; + test._panicked++; + current().assertions.panicked.push(test); + } + else if (test.Passed === false) + { + test._status = convey.statuses.fail; + pkg._failed++; + test._failed++; + current().assertions.failed.push(test); + } + else if (test.Skipped) + { + test._status = convey.statuses.skipped; + pkg._skipped++; + test._skipped++; + current().assertions.skipped.push(test); + } + else + { + test._status = convey.statuses.pass; + pkg._passed++; + test._passed++; + current().assertions.passed.push(test); + } + } + else + test._status = convey.statuses.pass; + + var storyPath = [{ Depth: -1, Title: test.TestName, _id: test._id }]; // Maintains the current assertion's story as we iterate + + for (var k in test.Stories) + { + var story = makeContext(test.Stories[k]); + + story._id = uniqueID; + story._pkgid = pkg._id; + current().overall.assertions += story.Assertions.length; + + // Establish the current story path so we can report the context + // of failures and panicks more conveniently at the top of the page + if (storyPath.length > 0) + for (var x = storyPath[storyPath.length - 1].Depth; x >= test.Stories[k].Depth; x--) + storyPath.pop(); + storyPath.push({ Depth: test.Stories[k].Depth, Title: test.Stories[k].Title, _id: test.Stories[k]._id }); + + + for (var l in story.Assertions) + { + var assertion = story.Assertions[l]; + assertion._id = uniqueID; + assertion._pkg = pkg.PackageName; + assertion._pkgId = pkg._id; + assertion._failed = !!assertion.Failure; + assertion._panicked = !!assertion.Error; + assertion._maxDepth = storyPath[storyPath.length - 1].Depth; + $.extend(assertion._path = [], storyPath); + + if (assertion.Failure) + { + current().assertions.failed.push(assertion); + pkg._failed++; + test._failed++; + story._failed++; + } + if (assertion.Error) + { + current().assertions.panicked.push(assertion); + pkg._panicked++; + test._panicked++; + story._panicked++; + } + if (assertion.Skipped) + { + current().assertions.skipped.push(assertion); + pkg._skipped++; + test._skipped++; + story._skipped++; + } + if (!assertion.Failure && !assertion.Error && !assertion.Skipped) + { + current().assertions.passed.push(assertion); + pkg._passed++; + test._passed++; + story._passed++; + } + } + + assignStatus(story); + uniqueID++; + } + + if (!test.Passed && !test._failed && !test._panicked) + { + // Edge case: Developer is using the GoConvey DSL, but maybe + // in some cases is using t.Error() instead of So() assertions. + // This can be detected, assuming all child stories with + // assertions (in this test) are passing. + test._status = convey.statuses.fail; + pkg._failed++; + test._failed++; + current().assertions.failed.push(test); + } + } + } + + current().overall.passed = current().assertions.passed.length; + current().overall.panics = current().assertions.panicked.length; + current().overall.failures = current().assertions.failed.length; + current().overall.skipped = current().assertions.skipped.length; + + current().overall.coverage = Math.round((coverageAvgHelper.coverageSum / (coverageAvgHelper.countedPackages || 1)) * 100) / 100; + current().overall.duration = Math.round(current().overall.duration * 1000) / 1000; + + // Compute the coverage delta (difference in overall coverage between now and last frame) + // Only compare coverage on the same watch path + var coverDelta = current().overall.coverage; + if (convey.framesOnSamePath > 2) + coverDelta = current().overall.coverage - convey.history[convey.history.length - 2].overall.coverage; + current().coverDelta = Math.round(coverDelta * 100) / 100; + + + // Build failures trump panics, + // Panics trump failures, + // Failures trump pass. + if (current().overall.failedBuilds) + changeStatus(convey.statuses.buildfail); + else if (current().overall.panics) + changeStatus(convey.statuses.panic); + else if (current().overall.failures) + changeStatus(convey.statuses.fail); + else + changeStatus(convey.statuses.pass); + + // Save our organized package lists + current().packages = packages; + + log(" Assertions: " + current().overall.assertions); + log(" Passed: " + current().overall.passed); + log(" Skipped: " + current().overall.skipped); + log(" Failures: " + current().overall.failures); + log(" Panics: " + current().overall.panics); + log("Build Failures: " + current().overall.failedBuilds); + log(" Coverage: " + current().overall.coverage + "% (" + showCoverDelta(current().coverDelta) + ")"); + + // Save timestamp when this test was executed + convey.moments['last-test'] = moment(); + + + + // Render... render ALL THE THINGS! (All model/state modifications are DONE!) + renderFrame(current()); + // Now, just finish up miscellaneous UI things + + + // Add this frame to the history pane + var framePiece = render('tpl-history', current()); + $('.history .container').prepend(framePiece); + $('.history .item:first-child').addClass('selected'); + convey.moments['frame-'+current().id] = moment(); + if (convey.history.length > convey.maxHistory) + { + // Delete the oldest frame out of the history pane if we have too many + convey.history.splice(0, 1); + $('.history .container .item').last().remove(); + } + + // Now add the momentjs time to the new frame in the history + convey.intervalFuncs.momentjs(); + + // Show notification, if enabled + var levelRegex = new RegExp("("+convey.notificationLevel+")", "i"); + if (notif() && current().overall.status.class.match(levelRegex)) + { + log("Showing notification"); + if (convey.notif) + { + clearTimeout(convey.notifTimer); + convey.notif.close(); + } + + var notifText = notifSummary(current()); + + convey.notif = new Notification(notifText.title, { + body: notifText.body, + icon: $('.favicon').attr('href') + }); + + convey.notif.onclick = function() { + window.focus(); + }; + + convey.notifTimer = setTimeout(function() { convey.notif.close(); }, 5000); + } + + // Update title in title bar + if (current().overall.passed === current().overall.assertions && current().overall.status.class === "ok") + $('title').text("GoConvey (ALL PASS)"); + else + $('title').text("GoConvey [" + current().overall.status.text + "] " + current().overall.passed + "/" + current().overall.assertions); + + setTooltips(); + + // All done! + log("Processing complete"); +} + +// Updates the entire UI given a frame from the history +function renderFrame(frame) +{ + log("Rendering frame (id: " + frame.id + ")"); + + $('#coverage').html(render('tpl-coverage', frame.packages.tested.sort(sortPackages))); + $('#ignored').html(render('tpl-ignored', frame.packages.ignored.sort(sortPackages))); + $('#nogofiles').html(render('tpl-nogofiles', frame.packages.nogofiles.sort(sortPackages))); + $('#notestfiles').html(render('tpl-notestfiles', frame.packages.notestfiles.sort(sortPackages))); + $('#notestfn').html(render('tpl-notestfn', frame.packages.notestfn.sort(sortPackages))); + + if (frame.overall.failedBuilds) + { + $('.buildfailures').show(); + $('#buildfailures').html(render('tpl-buildfailures', frame.failedBuilds)); + } + else + $('.buildfailures').hide(); + + if (frame.overall.panics) + { + $('.panics').show(); + $('#panics').html(render('tpl-panics', frame.assertions.panicked)); + } + else + $('.panics').hide(); + + + if (frame.overall.failures) + { + $('.failures').show(); + $('#failures').html(render('tpl-failures', frame.assertions.failed)); + $(".failure").each(function() { + $(this).prettyTextDiff(); + }); + } + else + $('.failures').hide(); + + $('#stories').html(render('tpl-stories', frame.packages.tested.sort(sortPackages))); + $('#stories').append(render('tpl-stories', frame.packages.ignored.sort(sortPackages))); + + var pkgDefaultView = get('pkg-expand-collapse'); + $('.story-pkg.expanded').each(function() + { + if (pkgDefaultView === "collapsed" && convey.packageStates[$(this).data('pkg-name')] !== "expanded") + collapsePackage($(this).data('pkg')); + }); + + redrawCoverageBars(); + + $('#assert-count').html(""+frame.overall.assertions+" assertion" + + (frame.overall.assertions !== 1 ? "s" : "")); + $('#skip-count').html(""+frame.assertions.skipped.length + " skipped"); + $('#fail-count').html(""+frame.assertions.failed.length + " failed"); + $('#panic-count').html(""+frame.assertions.panicked.length + " panicked"); + $('#duration').html(""+frame.overall.duration + "s"); + + $('#narrow-assert-count').html(""+frame.overall.assertions+""); + $('#narrow-skip-count').html(""+frame.assertions.skipped.length + ""); + $('#narrow-fail-count').html(""+frame.assertions.failed.length + ""); + $('#narrow-panic-count').html(""+frame.assertions.panicked.length + ""); + + $('.history .item').removeClass('selected'); + + + setDebugOutputUI(get('show-debug-output')); + + log("Rendering finished"); +} + + + + + + + +function enumSel(id, val) +{ + if (typeof id === "string" && typeof val === "string") + { + $('.enum#'+id+' > li').each(function() + { + if ($(this).data(id).toString() === val) + { + $(this).addClass(convey.layout.selClass).siblings().removeClass(convey.layout.selClass); + return false; + } + }); + } + else + $(this).addClass(convey.layout.selClass).siblings().removeClass(convey.layout.selClass); +} + +function toggle(jqelem, switchelem) +{ + var speed = 250; + var transition = 'easeInOutQuart'; + var containerSel = '.container'; + + if (!jqelem.is(':visible')) + { + $(containerSel, jqelem).css('opacity', 0); + jqelem.stop().slideDown(speed, transition, function() + { + if (switchelem) + switchelem.toggleClass(convey.layout.selClass); + $(containerSel, jqelem).stop().animate({ + opacity: 1 + }, speed); + reframe(); + }); + } + else + { + $(containerSel, jqelem).stop().animate({ + opacity: 0 + }, speed, function() + { + if (switchelem) + switchelem.toggleClass(convey.layout.selClass); + jqelem.stop().slideUp(speed, transition, function() { reframe(); }); + }); + } +} + +function changeStatus(newStatus, isHistoricalFrame) +{ + if (!newStatus || !newStatus.class || !newStatus.text) + newStatus = convey.statuses.pass; + + var sameStatus = newStatus.class === convey.overallClass; + + // The CSS class .flash and the jQuery UI 'pulsate' effect don't play well together. + // This series of callbacks does the flickering/pulsating as well as + // enabling/disabling flashing in the proper order so that they don't overlap. + // TODO: I suppose the pulsating could also be done with just CSS, maybe...? + + if (convey.uiEffects) + { + var times = sameStatus ? 3 : 2; + var duration = sameStatus ? 500 : 300; + + $('.overall .status').removeClass('flash').effect("pulsate", {times: times}, duration, function() + { + $(this).text(newStatus.text); + + if (newStatus !== convey.statuses.pass) // only flicker extra when not currently passing + { + $(this).effect("pulsate", {times: 1}, 300, function() + { + $(this).effect("pulsate", {times: 1}, 500, function() + { + if (newStatus === convey.statuses.panic + || newStatus === convey.statuses.buildfail) + $(this).addClass('flash'); + else + $(this).removeClass('flash'); + }); + }); + } + }); + } + else + $('.overall .status').text(newStatus.text); + + if (!sameStatus) // change the color + $('.overall').switchClass(convey.overallClass, newStatus.class, 1000); + + if (!isHistoricalFrame) + current().overall.status = newStatus; + convey.overallClass = newStatus.class; + $('.favicon').attr('href', '/resources/ico/goconvey-'+newStatus.class+'.ico'); +} + +function updateWatchPath() +{ + $.get("/watch", function(data) + { + var newPath = $.trim(data); + if (newPath !== $('#path').val()) + convey.framesOnSamePath = 1; + $('#path').val(newPath); + }); +} + +function notifSummary(frame) +{ + var body = frame.overall.passed + " passed, "; + + if (frame.overall.failedBuilds) + body += frame.overall.failedBuilds + " build" + (frame.overall.failedBuilds !== 1 ? "s" : "") + " failed, "; + if (frame.overall.failures) + body += frame.overall.failures + " failed, "; + if (frame.overall.panics) + body += frame.overall.panics + " panicked, "; + body += frame.overall.skipped + " skipped"; + + body += "\r\n" + frame.overall.duration + "s"; + + if (frame.coverDelta > 0) + body += "\r\n↑ coverage (" + showCoverDelta(frame.coverDelta) + ")"; + else if (frame.coverDelta < 0) + body += "\r\n↓ coverage (" + showCoverDelta(frame.coverDelta) + ")"; + + return { + title: frame.overall.status.text.toUpperCase(), + body: body + }; +} + +function redrawCoverageBars() +{ + $('.pkg-cover-bar').each(function() + { + var pkgName = $(this).data("pkg"); + var hue = $(this).data("width"); + var hueDiff = hue; + + if (convey.history.length > 1) + { + var oldHue = convey.history[convey.history.length - 2].packages.coverage[pkgName] || 0; + $(this).width(oldHue + "%"); + hueDiff = hue - oldHue; + } + + $(this).animate({ + width: "+=" + hueDiff + "%" + }, 1250); + }); + + colorizeCoverageBars(); +} + +function colorizeCoverageBars() +{ + var colorTpl = convey.config.themes[convey.theme].coverage + || "hsla({{hue}}, 75%, 30%, .3)"; //default color template + + $('.pkg-cover-bar').each(function() + { + var hue = $(this).data("width"); + $(this).css({ + background: colorTpl.replace("{{hue}}", hue) + }); + }); +} + + +function getFrame(id) +{ + for (var i in convey.history) + if (convey.history[i].id === id) + return convey.history[i]; +} + +function render(templateID, context) +{ + var tpl = $('#' + templateID).text(); + return $($.trim(Mark.up(tpl, context))); +} + +function reframe() +{ + var heightBelowHeader = $(window).height() - convey.layout.header.outerHeight(); + var middleHeight = heightBelowHeader - convey.layout.footer.outerHeight(); + convey.layout.frame.height(middleHeight); + + var pathWidth = $(window).width() - $('#logo').outerWidth() - $('#control-buttons').outerWidth() - 10; + $('#path-container').width(pathWidth); +} + +function notif() +{ + return get('notifications') === "true"; // stored as strings +} + +function showServerDown(message) +{ + $('.server-down .notice-message').text(message); + $('.server-down').show(); + $('.server-not-down').hide(); + reframe(); +} + +function hideServerDown() +{ + $('.server-down').hide(); + $('.server-not-down').show(); + reframe(); +} + +function log(msg) +{ + var jqLog = $('#log'); + if (jqLog.length > 0) + { + var t = new Date(); + var h = zerofill(t.getHours(), 2); + var m = zerofill(t.getMinutes(), 2); + var s = zerofill(t.getSeconds(), 2); + var ms = zerofill(t.getMilliseconds(), 3); + date = h + ":" + m + ":" + s + "." + ms; + + $(jqLog).append(render('tpl-log-line', { time: date, msg: msg })); + $(jqLog).parent('.col').scrollTop(jqLog[0].scrollHeight); + } + else + console.log(msg); +} + +function zerofill(val, count) +{ + // Cheers to http://stackoverflow.com/a/9744576/1048862 + var pad = new Array(1 + count).join('0'); + return (pad + val).slice(-pad.length); +} + +// Sorts packages ascending by only the last part of their name +// Can be passed into Array.sort() +function sortPackages(a, b) +{ + var aPkg = splitPathName(a.PackageName); + var bPkg = splitPathName(b.PackageName); + + if (aPkg.length === 0 || bPkg.length === 0) + return 0; + + var aName = aPkg.parts[aPkg.parts.length - 1].toLowerCase(); + var bName = bPkg.parts[bPkg.parts.length - 1].toLowerCase(); + + if (aName < bName) + return -1; + else if (aName > bName) + return 1; + else + return 0; + + /* + MEMO: Use to sort by entire package name: + if (a.PackageName < b.PackageName) return -1; + else if (a.PackageName > b.PackageName) return 1; + else return 0; + */ +} + +function get(key) +{ + var val = localStorage.getItem(key); + if (val && (val[0] === '[' || val[0] === '{')) + return JSON.parse(val); + else + return val; +} + +function save(key, val) +{ + if (typeof val === 'object') + val = JSON.stringify(val); + else if (typeof val === 'number' || typeof val === 'boolean') + val = val.toString(); + localStorage.setItem(key, val); +} + +function splitPathName(str) +{ + var delim = str.indexOf('\\') > -1 ? '\\' : '/'; + return { delim: delim, parts: str.split(delim) }; +} + +function newFrame() +{ + return { + results: {}, // response from server (with some of our own context info) + packages: {}, // packages organized into statuses for convenience (like with coverage) + overall: emptyOverall(), // overall status info, compiled from server's response + assertions: emptyAssertions(), // lists of assertions, compiled from server's response + failedBuilds: [], // list of packages that failed to build + timestamp: moment(), // the timestamp of this "freeze-state" + id: convey.frameCounter++, // unique ID for this frame + coverDelta: 0 // difference in total coverage from the last frame to this one + }; +} + +function emptyOverall() +{ + return { + status: {}, + duration: 0, + assertions: 0, + passed: 0, + panics: 0, + failures: 0, + skipped: 0, + failedBuilds: 0, + coverage: 0 + }; +} + +function emptyAssertions() +{ + return { + passed: [], + failed: [], + panicked: [], + skipped: [] + }; +} + +function makeContext(obj) +{ + obj._passed = 0; + obj._failed = 0; + obj._panicked = 0; + obj._skipped = 0; + obj._status = ''; + return obj; +} + +function current() +{ + return convey.history[convey.history.length - 1]; +} + +function assignStatus(obj) +{ + if (obj._skipped) + obj._status = 'skip'; + else if (obj.Outcome === "ignored") + obj._status = convey.statuses.ignored; + else if (obj._panicked) + obj._status = convey.statuses.panic; + else if (obj._failed || obj.Outcome === "failed") + obj._status = convey.statuses.fail; + else + obj._status = convey.statuses.pass; +} + +function showCoverDelta(delta) +{ + if (delta > 0) + return "+" + delta + "%"; + else if (delta === 0) + return "±" + delta + "%"; + else + return delta + "%"; +} + +function customMarkupPipes() +{ + // MARKUP.JS custom pipes + Mark.pipes.relativePath = function(str) + { + basePath = new RegExp($('#path').val()+'[\\/]', 'gi'); + return str.replace(basePath, ''); + }; + Mark.pipes.htmlSafe = function(str) + { + return str.replace(//g, ">"); + }; + Mark.pipes.ansiColours = ansispan; + Mark.pipes.boldPkgName = function(str) + { + var pkg = splitPathName(str); + pkg.parts[0] = '' + pkg.parts[0]; + pkg.parts[pkg.parts.length - 1] = "" + pkg.parts[pkg.parts.length - 1] + ""; + return pkg.parts.join(pkg.delim); + }; + Mark.pipes.needsDiff = function(test) + { + return !!test.Failure && (test.Expected !== "" || test.Actual !== ""); + }; + Mark.pipes.coveragePct = function(str) + { + // Expected input: 75% to be represented as: "75.0" + var num = parseInt(str); // we only need int precision + if (num < 0) + return "0"; + else if (num <= 5) + return "5"; // Still shows low coverage + else if (num > 100) + str = "100"; + return str; + }; + Mark.pipes.coverageDisplay = function(str) + { + var num = parseFloat(str); + return num < 0 ? "" : num + "% coverage"; + }; + Mark.pipes.coverageReportName = function(str) + { + return str.replace(/\//g, "-"); + }; +} + +function suppress(event) +{ + if (!event) + return false; + if (event.preventDefault) + event.preventDefault(); + if (event.stopPropagation) + event.stopPropagation(); + event.cancelBubble = true; + return false; +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/ansispan.js b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/ansispan.js new file mode 100644 index 0000000000..3d8603a6d1 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/ansispan.js @@ -0,0 +1,67 @@ +/* +Copyright (C) 2011 by Maciej Małecki + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +var ansispan = function (str) { + Object.keys(ansispan.foregroundColors).forEach(function (ansi) { + var span = ''; + + // + // `\033[Xm` == `\033[0;Xm` sets foreground color to `X`. + // + + str = str.replace( + new RegExp('\033\\[' + ansi + 'm', 'g'), + span + ).replace( + new RegExp('\033\\[0;' + ansi + 'm', 'g'), + span + ); + }); + // + // `\033[1m` enables bold font, `\033[22m` disables it + // + str = str.replace(/\033\[1m/g, '').replace(/\033\[22m/g, ''); + + // + // `\033[3m` enables italics font, `\033[23m` disables it + // + str = str.replace(/\033\[3m/g, '').replace(/\033\[23m/g, ''); + + str = str.replace(/\033\[m/g, ''); + str = str.replace(/\033\[0m/g, ''); + return str.replace(/\033\[39m/g, ''); +}; + +ansispan.foregroundColors = { + '30': 'black', + '31': 'red', + '32': 'green', + '33': 'yellow', + '34': 'blue', + '35': 'purple', + '36': 'cyan', + '37': 'white' +}; + +if (typeof module !== 'undefined' && module.exports) { + module.exports = ansispan; +} diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/diff_match_patch.js b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/diff_match_patch.js new file mode 100644 index 0000000000..112130e097 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/diff_match_patch.js @@ -0,0 +1,2193 @@ +/** + * Diff Match and Patch + * + * Copyright 2006 Google Inc. + * http://code.google.com/p/google-diff-match-patch/ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @fileoverview Computes the difference between two texts to create a patch. + * Applies the patch onto another text, allowing for errors. + * @author fraser@google.com (Neil Fraser) + */ + +/** + * Class containing the diff, match and patch methods. + * @constructor + */ +function diff_match_patch() { + + // Defaults. + // Redefine these in your program to override the defaults. + + // Number of seconds to map a diff before giving up (0 for infinity). + this.Diff_Timeout = 1.0; + // Cost of an empty edit operation in terms of edit characters. + this.Diff_EditCost = 4; + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + this.Match_Threshold = 0.5; + // How far to search for a match (0 = exact location, 1000+ = broad match). + // A match this many characters away from the expected location will add + // 1.0 to the score (0.0 is a perfect match). + this.Match_Distance = 1000; + // When deleting a large block of text (over ~64 characters), how close do + // the contents have to be to match the expected contents. (0.0 = perfection, + // 1.0 = very loose). Note that Match_Threshold controls how closely the + // end points of a delete need to match. + this.Patch_DeleteThreshold = 0.5; + // Chunk size for context length. + this.Patch_Margin = 4; + + // The number of bits in an int. + this.Match_MaxBits = 32; +} + + +// DIFF FUNCTIONS + + +/** + * The data structure representing a diff is an array of tuples: + * [[DIFF_DELETE, 'Hello'], [DIFF_INSERT, 'Goodbye'], [DIFF_EQUAL, ' world.']] + * which means: delete 'Hello', add 'Goodbye' and keep ' world.' + */ +var DIFF_DELETE = -1; +var DIFF_INSERT = 1; +var DIFF_EQUAL = 0; + +/** @typedef {{0: number, 1: string}} */ +diff_match_patch.Diff; + + +/** + * Find the differences between two texts. Simplifies the problem by stripping + * any common prefix or suffix off the texts before diffing. + * @param {string} text1 Old string to be diffed. + * @param {string} text2 New string to be diffed. + * @param {boolean=} opt_checklines Optional speedup flag. If present and false, + * then don't run a line-level diff first to identify the changed areas. + * Defaults to true, which does a faster, slightly less optimal diff. + * @param {number} opt_deadline Optional time when the diff should be complete + * by. Used internally for recursive calls. Users should set DiffTimeout + * instead. + * @return {!Array.} Array of diff tuples. + */ +diff_match_patch.prototype.diff_main = function(text1, text2, opt_checklines, + opt_deadline) { + // Set a deadline by which time the diff must be complete. + if (typeof opt_deadline == 'undefined') { + if (this.Diff_Timeout <= 0) { + opt_deadline = Number.MAX_VALUE; + } else { + opt_deadline = (new Date).getTime() + this.Diff_Timeout * 1000; + } + } + var deadline = opt_deadline; + + // Check for null inputs. + if (text1 == null || text2 == null) { + throw new Error('Null input. (diff_main)'); + } + + // Check for equality (speedup). + if (text1 == text2) { + if (text1) { + return [[DIFF_EQUAL, text1]]; + } + return []; + } + + if (typeof opt_checklines == 'undefined') { + opt_checklines = true; + } + var checklines = opt_checklines; + + // Trim off common prefix (speedup). + var commonlength = this.diff_commonPrefix(text1, text2); + var commonprefix = text1.substring(0, commonlength); + text1 = text1.substring(commonlength); + text2 = text2.substring(commonlength); + + // Trim off common suffix (speedup). + commonlength = this.diff_commonSuffix(text1, text2); + var commonsuffix = text1.substring(text1.length - commonlength); + text1 = text1.substring(0, text1.length - commonlength); + text2 = text2.substring(0, text2.length - commonlength); + + // Compute the diff on the middle block. + var diffs = this.diff_compute_(text1, text2, checklines, deadline); + + // Restore the prefix and suffix. + if (commonprefix) { + diffs.unshift([DIFF_EQUAL, commonprefix]); + } + if (commonsuffix) { + diffs.push([DIFF_EQUAL, commonsuffix]); + } + this.diff_cleanupMerge(diffs); + return diffs; +}; + + +/** + * Find the differences between two texts. Assumes that the texts do not + * have any common prefix or suffix. + * @param {string} text1 Old string to be diffed. + * @param {string} text2 New string to be diffed. + * @param {boolean} checklines Speedup flag. If false, then don't run a + * line-level diff first to identify the changed areas. + * If true, then run a faster, slightly less optimal diff. + * @param {number} deadline Time when the diff should be complete by. + * @return {!Array.} Array of diff tuples. + * @private + */ +diff_match_patch.prototype.diff_compute_ = function(text1, text2, checklines, + deadline) { + var diffs; + + if (!text1) { + // Just add some text (speedup). + return [[DIFF_INSERT, text2]]; + } + + if (!text2) { + // Just delete some text (speedup). + return [[DIFF_DELETE, text1]]; + } + + var longtext = text1.length > text2.length ? text1 : text2; + var shorttext = text1.length > text2.length ? text2 : text1; + var i = longtext.indexOf(shorttext); + if (i != -1) { + // Shorter text is inside the longer text (speedup). + diffs = [[DIFF_INSERT, longtext.substring(0, i)], + [DIFF_EQUAL, shorttext], + [DIFF_INSERT, longtext.substring(i + shorttext.length)]]; + // Swap insertions for deletions if diff is reversed. + if (text1.length > text2.length) { + diffs[0][0] = diffs[2][0] = DIFF_DELETE; + } + return diffs; + } + + if (shorttext.length == 1) { + // Single character string. + // After the previous speedup, the character can't be an equality. + return [[DIFF_DELETE, text1], [DIFF_INSERT, text2]]; + } + + // Check to see if the problem can be split in two. + var hm = this.diff_halfMatch_(text1, text2); + if (hm) { + // A half-match was found, sort out the return data. + var text1_a = hm[0]; + var text1_b = hm[1]; + var text2_a = hm[2]; + var text2_b = hm[3]; + var mid_common = hm[4]; + // Send both pairs off for separate processing. + var diffs_a = this.diff_main(text1_a, text2_a, checklines, deadline); + var diffs_b = this.diff_main(text1_b, text2_b, checklines, deadline); + // Merge the results. + return diffs_a.concat([[DIFF_EQUAL, mid_common]], diffs_b); + } + + if (checklines && text1.length > 100 && text2.length > 100) { + return this.diff_lineMode_(text1, text2, deadline); + } + + return this.diff_bisect_(text1, text2, deadline); +}; + + +/** + * Do a quick line-level diff on both strings, then rediff the parts for + * greater accuracy. + * This speedup can produce non-minimal diffs. + * @param {string} text1 Old string to be diffed. + * @param {string} text2 New string to be diffed. + * @param {number} deadline Time when the diff should be complete by. + * @return {!Array.} Array of diff tuples. + * @private + */ +diff_match_patch.prototype.diff_lineMode_ = function(text1, text2, deadline) { + // Scan the text on a line-by-line basis first. + var a = this.diff_linesToChars_(text1, text2); + text1 = a.chars1; + text2 = a.chars2; + var linearray = a.lineArray; + + var diffs = this.diff_main(text1, text2, false, deadline); + + // Convert the diff back to original text. + this.diff_charsToLines_(diffs, linearray); + // Eliminate freak matches (e.g. blank lines) + this.diff_cleanupSemantic(diffs); + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs.push([DIFF_EQUAL, '']); + var pointer = 0; + var count_delete = 0; + var count_insert = 0; + var text_delete = ''; + var text_insert = ''; + while (pointer < diffs.length) { + switch (diffs[pointer][0]) { + case DIFF_INSERT: + count_insert++; + text_insert += diffs[pointer][1]; + break; + case DIFF_DELETE: + count_delete++; + text_delete += diffs[pointer][1]; + break; + case DIFF_EQUAL: + // Upon reaching an equality, check for prior redundancies. + if (count_delete >= 1 && count_insert >= 1) { + // Delete the offending records and add the merged ones. + diffs.splice(pointer - count_delete - count_insert, + count_delete + count_insert); + pointer = pointer - count_delete - count_insert; + var a = this.diff_main(text_delete, text_insert, false, deadline); + for (var j = a.length - 1; j >= 0; j--) { + diffs.splice(pointer, 0, a[j]); + } + pointer = pointer + a.length; + } + count_insert = 0; + count_delete = 0; + text_delete = ''; + text_insert = ''; + break; + } + pointer++; + } + diffs.pop(); // Remove the dummy entry at the end. + + return diffs; +}; + + +/** + * Find the 'middle snake' of a diff, split the problem in two + * and return the recursively constructed diff. + * See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. + * @param {string} text1 Old string to be diffed. + * @param {string} text2 New string to be diffed. + * @param {number} deadline Time at which to bail if not yet complete. + * @return {!Array.} Array of diff tuples. + * @private + */ +diff_match_patch.prototype.diff_bisect_ = function(text1, text2, deadline) { + // Cache the text lengths to prevent multiple calls. + var text1_length = text1.length; + var text2_length = text2.length; + var max_d = Math.ceil((text1_length + text2_length) / 2); + var v_offset = max_d; + var v_length = 2 * max_d; + var v1 = new Array(v_length); + var v2 = new Array(v_length); + // Setting all elements to -1 is faster in Chrome & Firefox than mixing + // integers and undefined. + for (var x = 0; x < v_length; x++) { + v1[x] = -1; + v2[x] = -1; + } + v1[v_offset + 1] = 0; + v2[v_offset + 1] = 0; + var delta = text1_length - text2_length; + // If the total number of characters is odd, then the front path will collide + // with the reverse path. + var front = (delta % 2 != 0); + // Offsets for start and end of k loop. + // Prevents mapping of space beyond the grid. + var k1start = 0; + var k1end = 0; + var k2start = 0; + var k2end = 0; + for (var d = 0; d < max_d; d++) { + // Bail out if deadline is reached. + if ((new Date()).getTime() > deadline) { + break; + } + + // Walk the front path one step. + for (var k1 = -d + k1start; k1 <= d - k1end; k1 += 2) { + var k1_offset = v_offset + k1; + var x1; + if (k1 == -d || (k1 != d && v1[k1_offset - 1] < v1[k1_offset + 1])) { + x1 = v1[k1_offset + 1]; + } else { + x1 = v1[k1_offset - 1] + 1; + } + var y1 = x1 - k1; + while (x1 < text1_length && y1 < text2_length && + text1.charAt(x1) == text2.charAt(y1)) { + x1++; + y1++; + } + v1[k1_offset] = x1; + if (x1 > text1_length) { + // Ran off the right of the graph. + k1end += 2; + } else if (y1 > text2_length) { + // Ran off the bottom of the graph. + k1start += 2; + } else if (front) { + var k2_offset = v_offset + delta - k1; + if (k2_offset >= 0 && k2_offset < v_length && v2[k2_offset] != -1) { + // Mirror x2 onto top-left coordinate system. + var x2 = text1_length - v2[k2_offset]; + if (x1 >= x2) { + // Overlap detected. + return this.diff_bisectSplit_(text1, text2, x1, y1, deadline); + } + } + } + } + + // Walk the reverse path one step. + for (var k2 = -d + k2start; k2 <= d - k2end; k2 += 2) { + var k2_offset = v_offset + k2; + var x2; + if (k2 == -d || (k2 != d && v2[k2_offset - 1] < v2[k2_offset + 1])) { + x2 = v2[k2_offset + 1]; + } else { + x2 = v2[k2_offset - 1] + 1; + } + var y2 = x2 - k2; + while (x2 < text1_length && y2 < text2_length && + text1.charAt(text1_length - x2 - 1) == + text2.charAt(text2_length - y2 - 1)) { + x2++; + y2++; + } + v2[k2_offset] = x2; + if (x2 > text1_length) { + // Ran off the left of the graph. + k2end += 2; + } else if (y2 > text2_length) { + // Ran off the top of the graph. + k2start += 2; + } else if (!front) { + var k1_offset = v_offset + delta - k2; + if (k1_offset >= 0 && k1_offset < v_length && v1[k1_offset] != -1) { + var x1 = v1[k1_offset]; + var y1 = v_offset + x1 - k1_offset; + // Mirror x2 onto top-left coordinate system. + x2 = text1_length - x2; + if (x1 >= x2) { + // Overlap detected. + return this.diff_bisectSplit_(text1, text2, x1, y1, deadline); + } + } + } + } + } + // Diff took too long and hit the deadline or + // number of diffs equals number of characters, no commonality at all. + return [[DIFF_DELETE, text1], [DIFF_INSERT, text2]]; +}; + + +/** + * Given the location of the 'middle snake', split the diff in two parts + * and recurse. + * @param {string} text1 Old string to be diffed. + * @param {string} text2 New string to be diffed. + * @param {number} x Index of split point in text1. + * @param {number} y Index of split point in text2. + * @param {number} deadline Time at which to bail if not yet complete. + * @return {!Array.} Array of diff tuples. + * @private + */ +diff_match_patch.prototype.diff_bisectSplit_ = function(text1, text2, x, y, + deadline) { + var text1a = text1.substring(0, x); + var text2a = text2.substring(0, y); + var text1b = text1.substring(x); + var text2b = text2.substring(y); + + // Compute both diffs serially. + var diffs = this.diff_main(text1a, text2a, false, deadline); + var diffsb = this.diff_main(text1b, text2b, false, deadline); + + return diffs.concat(diffsb); +}; + + +/** + * Split two texts into an array of strings. Reduce the texts to a string of + * hashes where each Unicode character represents one line. + * @param {string} text1 First string. + * @param {string} text2 Second string. + * @return {{chars1: string, chars2: string, lineArray: !Array.}} + * An object containing the encoded text1, the encoded text2 and + * the array of unique strings. + * The zeroth element of the array of unique strings is intentionally blank. + * @private + */ +diff_match_patch.prototype.diff_linesToChars_ = function(text1, text2) { + var lineArray = []; // e.g. lineArray[4] == 'Hello\n' + var lineHash = {}; // e.g. lineHash['Hello\n'] == 4 + + // '\x00' is a valid character, but various debuggers don't like it. + // So we'll insert a junk entry to avoid generating a null character. + lineArray[0] = ''; + + /** + * Split a text into an array of strings. Reduce the texts to a string of + * hashes where each Unicode character represents one line. + * Modifies linearray and linehash through being a closure. + * @param {string} text String to encode. + * @return {string} Encoded string. + * @private + */ + function diff_linesToCharsMunge_(text) { + var chars = ''; + // Walk the text, pulling out a substring for each line. + // text.split('\n') would would temporarily double our memory footprint. + // Modifying text would create many large strings to garbage collect. + var lineStart = 0; + var lineEnd = -1; + // Keeping our own length variable is faster than looking it up. + var lineArrayLength = lineArray.length; + while (lineEnd < text.length - 1) { + lineEnd = text.indexOf('\n', lineStart); + if (lineEnd == -1) { + lineEnd = text.length - 1; + } + var line = text.substring(lineStart, lineEnd + 1); + lineStart = lineEnd + 1; + + if (lineHash.hasOwnProperty ? lineHash.hasOwnProperty(line) : + (lineHash[line] !== undefined)) { + chars += String.fromCharCode(lineHash[line]); + } else { + chars += String.fromCharCode(lineArrayLength); + lineHash[line] = lineArrayLength; + lineArray[lineArrayLength++] = line; + } + } + return chars; + } + + var chars1 = diff_linesToCharsMunge_(text1); + var chars2 = diff_linesToCharsMunge_(text2); + return {chars1: chars1, chars2: chars2, lineArray: lineArray}; +}; + + +/** + * Rehydrate the text in a diff from a string of line hashes to real lines of + * text. + * @param {!Array.} diffs Array of diff tuples. + * @param {!Array.} lineArray Array of unique strings. + * @private + */ +diff_match_patch.prototype.diff_charsToLines_ = function(diffs, lineArray) { + for (var x = 0; x < diffs.length; x++) { + var chars = diffs[x][1]; + var text = []; + for (var y = 0; y < chars.length; y++) { + text[y] = lineArray[chars.charCodeAt(y)]; + } + diffs[x][1] = text.join(''); + } +}; + + +/** + * Determine the common prefix of two strings. + * @param {string} text1 First string. + * @param {string} text2 Second string. + * @return {number} The number of characters common to the start of each + * string. + */ +diff_match_patch.prototype.diff_commonPrefix = function(text1, text2) { + // Quick check for common null cases. + if (!text1 || !text2 || text1.charAt(0) != text2.charAt(0)) { + return 0; + } + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + var pointermin = 0; + var pointermax = Math.min(text1.length, text2.length); + var pointermid = pointermax; + var pointerstart = 0; + while (pointermin < pointermid) { + if (text1.substring(pointerstart, pointermid) == + text2.substring(pointerstart, pointermid)) { + pointermin = pointermid; + pointerstart = pointermin; + } else { + pointermax = pointermid; + } + pointermid = Math.floor((pointermax - pointermin) / 2 + pointermin); + } + return pointermid; +}; + + +/** + * Determine the common suffix of two strings. + * @param {string} text1 First string. + * @param {string} text2 Second string. + * @return {number} The number of characters common to the end of each string. + */ +diff_match_patch.prototype.diff_commonSuffix = function(text1, text2) { + // Quick check for common null cases. + if (!text1 || !text2 || + text1.charAt(text1.length - 1) != text2.charAt(text2.length - 1)) { + return 0; + } + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + var pointermin = 0; + var pointermax = Math.min(text1.length, text2.length); + var pointermid = pointermax; + var pointerend = 0; + while (pointermin < pointermid) { + if (text1.substring(text1.length - pointermid, text1.length - pointerend) == + text2.substring(text2.length - pointermid, text2.length - pointerend)) { + pointermin = pointermid; + pointerend = pointermin; + } else { + pointermax = pointermid; + } + pointermid = Math.floor((pointermax - pointermin) / 2 + pointermin); + } + return pointermid; +}; + + +/** + * Determine if the suffix of one string is the prefix of another. + * @param {string} text1 First string. + * @param {string} text2 Second string. + * @return {number} The number of characters common to the end of the first + * string and the start of the second string. + * @private + */ +diff_match_patch.prototype.diff_commonOverlap_ = function(text1, text2) { + // Cache the text lengths to prevent multiple calls. + var text1_length = text1.length; + var text2_length = text2.length; + // Eliminate the null case. + if (text1_length == 0 || text2_length == 0) { + return 0; + } + // Truncate the longer string. + if (text1_length > text2_length) { + text1 = text1.substring(text1_length - text2_length); + } else if (text1_length < text2_length) { + text2 = text2.substring(0, text1_length); + } + var text_length = Math.min(text1_length, text2_length); + // Quick check for the worst case. + if (text1 == text2) { + return text_length; + } + + // Start by looking for a single character match + // and increase length until no match is found. + // Performance analysis: http://neil.fraser.name/news/2010/11/04/ + var best = 0; + var length = 1; + while (true) { + var pattern = text1.substring(text_length - length); + var found = text2.indexOf(pattern); + if (found == -1) { + return best; + } + length += found; + if (found == 0 || text1.substring(text_length - length) == + text2.substring(0, length)) { + best = length; + length++; + } + } +}; + + +/** + * Do the two texts share a substring which is at least half the length of the + * longer text? + * This speedup can produce non-minimal diffs. + * @param {string} text1 First string. + * @param {string} text2 Second string. + * @return {Array.} Five element Array, containing the prefix of + * text1, the suffix of text1, the prefix of text2, the suffix of + * text2 and the common middle. Or null if there was no match. + * @private + */ +diff_match_patch.prototype.diff_halfMatch_ = function(text1, text2) { + if (this.Diff_Timeout <= 0) { + // Don't risk returning a non-optimal diff if we have unlimited time. + return null; + } + var longtext = text1.length > text2.length ? text1 : text2; + var shorttext = text1.length > text2.length ? text2 : text1; + if (longtext.length < 4 || shorttext.length * 2 < longtext.length) { + return null; // Pointless. + } + var dmp = this; // 'this' becomes 'window' in a closure. + + /** + * Does a substring of shorttext exist within longtext such that the substring + * is at least half the length of longtext? + * Closure, but does not reference any external variables. + * @param {string} longtext Longer string. + * @param {string} shorttext Shorter string. + * @param {number} i Start index of quarter length substring within longtext. + * @return {Array.} Five element Array, containing the prefix of + * longtext, the suffix of longtext, the prefix of shorttext, the suffix + * of shorttext and the common middle. Or null if there was no match. + * @private + */ + function diff_halfMatchI_(longtext, shorttext, i) { + // Start with a 1/4 length substring at position i as a seed. + var seed = longtext.substring(i, i + Math.floor(longtext.length / 4)); + var j = -1; + var best_common = ''; + var best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b; + while ((j = shorttext.indexOf(seed, j + 1)) != -1) { + var prefixLength = dmp.diff_commonPrefix(longtext.substring(i), + shorttext.substring(j)); + var suffixLength = dmp.diff_commonSuffix(longtext.substring(0, i), + shorttext.substring(0, j)); + if (best_common.length < suffixLength + prefixLength) { + best_common = shorttext.substring(j - suffixLength, j) + + shorttext.substring(j, j + prefixLength); + best_longtext_a = longtext.substring(0, i - suffixLength); + best_longtext_b = longtext.substring(i + prefixLength); + best_shorttext_a = shorttext.substring(0, j - suffixLength); + best_shorttext_b = shorttext.substring(j + prefixLength); + } + } + if (best_common.length * 2 >= longtext.length) { + return [best_longtext_a, best_longtext_b, + best_shorttext_a, best_shorttext_b, best_common]; + } else { + return null; + } + } + + // First check if the second quarter is the seed for a half-match. + var hm1 = diff_halfMatchI_(longtext, shorttext, + Math.ceil(longtext.length / 4)); + // Check again based on the third quarter. + var hm2 = diff_halfMatchI_(longtext, shorttext, + Math.ceil(longtext.length / 2)); + var hm; + if (!hm1 && !hm2) { + return null; + } else if (!hm2) { + hm = hm1; + } else if (!hm1) { + hm = hm2; + } else { + // Both matched. Select the longest. + hm = hm1[4].length > hm2[4].length ? hm1 : hm2; + } + + // A half-match was found, sort out the return data. + var text1_a, text1_b, text2_a, text2_b; + if (text1.length > text2.length) { + text1_a = hm[0]; + text1_b = hm[1]; + text2_a = hm[2]; + text2_b = hm[3]; + } else { + text2_a = hm[0]; + text2_b = hm[1]; + text1_a = hm[2]; + text1_b = hm[3]; + } + var mid_common = hm[4]; + return [text1_a, text1_b, text2_a, text2_b, mid_common]; +}; + + +/** + * Reduce the number of edits by eliminating semantically trivial equalities. + * @param {!Array.} diffs Array of diff tuples. + */ +diff_match_patch.prototype.diff_cleanupSemantic = function(diffs) { + var changes = false; + var equalities = []; // Stack of indices where equalities are found. + var equalitiesLength = 0; // Keeping our own length var is faster in JS. + /** @type {?string} */ + var lastequality = null; + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer = 0; // Index of current position. + // Number of characters that changed prior to the equality. + var length_insertions1 = 0; + var length_deletions1 = 0; + // Number of characters that changed after the equality. + var length_insertions2 = 0; + var length_deletions2 = 0; + while (pointer < diffs.length) { + if (diffs[pointer][0] == DIFF_EQUAL) { // Equality found. + equalities[equalitiesLength++] = pointer; + length_insertions1 = length_insertions2; + length_deletions1 = length_deletions2; + length_insertions2 = 0; + length_deletions2 = 0; + lastequality = diffs[pointer][1]; + } else { // An insertion or deletion. + if (diffs[pointer][0] == DIFF_INSERT) { + length_insertions2 += diffs[pointer][1].length; + } else { + length_deletions2 += diffs[pointer][1].length; + } + // Eliminate an equality that is smaller or equal to the edits on both + // sides of it. + if (lastequality && (lastequality.length <= + Math.max(length_insertions1, length_deletions1)) && + (lastequality.length <= Math.max(length_insertions2, + length_deletions2))) { + // Duplicate record. + diffs.splice(equalities[equalitiesLength - 1], 0, + [DIFF_DELETE, lastequality]); + // Change second copy to insert. + diffs[equalities[equalitiesLength - 1] + 1][0] = DIFF_INSERT; + // Throw away the equality we just deleted. + equalitiesLength--; + // Throw away the previous equality (it needs to be reevaluated). + equalitiesLength--; + pointer = equalitiesLength > 0 ? equalities[equalitiesLength - 1] : -1; + length_insertions1 = 0; // Reset the counters. + length_deletions1 = 0; + length_insertions2 = 0; + length_deletions2 = 0; + lastequality = null; + changes = true; + } + } + pointer++; + } + + // Normalize the diff. + if (changes) { + this.diff_cleanupMerge(diffs); + } + this.diff_cleanupSemanticLossless(diffs); + + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1; + while (pointer < diffs.length) { + if (diffs[pointer - 1][0] == DIFF_DELETE && + diffs[pointer][0] == DIFF_INSERT) { + var deletion = diffs[pointer - 1][1]; + var insertion = diffs[pointer][1]; + var overlap_length1 = this.diff_commonOverlap_(deletion, insertion); + var overlap_length2 = this.diff_commonOverlap_(insertion, deletion); + if (overlap_length1 >= overlap_length2) { + if (overlap_length1 >= deletion.length / 2 || + overlap_length1 >= insertion.length / 2) { + // Overlap found. Insert an equality and trim the surrounding edits. + diffs.splice(pointer, 0, + [DIFF_EQUAL, insertion.substring(0, overlap_length1)]); + diffs[pointer - 1][1] = + deletion.substring(0, deletion.length - overlap_length1); + diffs[pointer + 1][1] = insertion.substring(overlap_length1); + pointer++; + } + } else { + if (overlap_length2 >= deletion.length / 2 || + overlap_length2 >= insertion.length / 2) { + // Reverse overlap found. + // Insert an equality and swap and trim the surrounding edits. + diffs.splice(pointer, 0, + [DIFF_EQUAL, deletion.substring(0, overlap_length2)]); + diffs[pointer - 1][0] = DIFF_INSERT; + diffs[pointer - 1][1] = + insertion.substring(0, insertion.length - overlap_length2); + diffs[pointer + 1][0] = DIFF_DELETE; + diffs[pointer + 1][1] = + deletion.substring(overlap_length2); + pointer++; + } + } + pointer++; + } + pointer++; + } +}; + + +/** + * Look for single edits surrounded on both sides by equalities + * which can be shifted sideways to align the edit to a word boundary. + * e.g: The cat came. -> The cat came. + * @param {!Array.} diffs Array of diff tuples. + */ +diff_match_patch.prototype.diff_cleanupSemanticLossless = function(diffs) { + /** + * Given two strings, compute a score representing whether the internal + * boundary falls on logical boundaries. + * Scores range from 6 (best) to 0 (worst). + * Closure, but does not reference any external variables. + * @param {string} one First string. + * @param {string} two Second string. + * @return {number} The score. + * @private + */ + function diff_cleanupSemanticScore_(one, two) { + if (!one || !two) { + // Edges are the best. + return 6; + } + + // Each port of this function behaves slightly differently due to + // subtle differences in each language's definition of things like + // 'whitespace'. Since this function's purpose is largely cosmetic, + // the choice has been made to use each language's native features + // rather than force total conformity. + var char1 = one.charAt(one.length - 1); + var char2 = two.charAt(0); + var nonAlphaNumeric1 = char1.match(diff_match_patch.nonAlphaNumericRegex_); + var nonAlphaNumeric2 = char2.match(diff_match_patch.nonAlphaNumericRegex_); + var whitespace1 = nonAlphaNumeric1 && + char1.match(diff_match_patch.whitespaceRegex_); + var whitespace2 = nonAlphaNumeric2 && + char2.match(diff_match_patch.whitespaceRegex_); + var lineBreak1 = whitespace1 && + char1.match(diff_match_patch.linebreakRegex_); + var lineBreak2 = whitespace2 && + char2.match(diff_match_patch.linebreakRegex_); + var blankLine1 = lineBreak1 && + one.match(diff_match_patch.blanklineEndRegex_); + var blankLine2 = lineBreak2 && + two.match(diff_match_patch.blanklineStartRegex_); + + if (blankLine1 || blankLine2) { + // Five points for blank lines. + return 5; + } else if (lineBreak1 || lineBreak2) { + // Four points for line breaks. + return 4; + } else if (nonAlphaNumeric1 && !whitespace1 && whitespace2) { + // Three points for end of sentences. + return 3; + } else if (whitespace1 || whitespace2) { + // Two points for whitespace. + return 2; + } else if (nonAlphaNumeric1 || nonAlphaNumeric2) { + // One point for non-alphanumeric. + return 1; + } + return 0; + } + + var pointer = 1; + // Intentionally ignore the first and last element (don't need checking). + while (pointer < diffs.length - 1) { + if (diffs[pointer - 1][0] == DIFF_EQUAL && + diffs[pointer + 1][0] == DIFF_EQUAL) { + // This is a single edit surrounded by equalities. + var equality1 = diffs[pointer - 1][1]; + var edit = diffs[pointer][1]; + var equality2 = diffs[pointer + 1][1]; + + // First, shift the edit as far left as possible. + var commonOffset = this.diff_commonSuffix(equality1, edit); + if (commonOffset) { + var commonString = edit.substring(edit.length - commonOffset); + equality1 = equality1.substring(0, equality1.length - commonOffset); + edit = commonString + edit.substring(0, edit.length - commonOffset); + equality2 = commonString + equality2; + } + + // Second, step character by character right, looking for the best fit. + var bestEquality1 = equality1; + var bestEdit = edit; + var bestEquality2 = equality2; + var bestScore = diff_cleanupSemanticScore_(equality1, edit) + + diff_cleanupSemanticScore_(edit, equality2); + while (edit.charAt(0) === equality2.charAt(0)) { + equality1 += edit.charAt(0); + edit = edit.substring(1) + equality2.charAt(0); + equality2 = equality2.substring(1); + var score = diff_cleanupSemanticScore_(equality1, edit) + + diff_cleanupSemanticScore_(edit, equality2); + // The >= encourages trailing rather than leading whitespace on edits. + if (score >= bestScore) { + bestScore = score; + bestEquality1 = equality1; + bestEdit = edit; + bestEquality2 = equality2; + } + } + + if (diffs[pointer - 1][1] != bestEquality1) { + // We have an improvement, save it back to the diff. + if (bestEquality1) { + diffs[pointer - 1][1] = bestEquality1; + } else { + diffs.splice(pointer - 1, 1); + pointer--; + } + diffs[pointer][1] = bestEdit; + if (bestEquality2) { + diffs[pointer + 1][1] = bestEquality2; + } else { + diffs.splice(pointer + 1, 1); + pointer--; + } + } + } + pointer++; + } +}; + +// Define some regex patterns for matching boundaries. +diff_match_patch.nonAlphaNumericRegex_ = /[^a-zA-Z0-9]/; +diff_match_patch.whitespaceRegex_ = /\s/; +diff_match_patch.linebreakRegex_ = /[\r\n]/; +diff_match_patch.blanklineEndRegex_ = /\n\r?\n$/; +diff_match_patch.blanklineStartRegex_ = /^\r?\n\r?\n/; + +/** + * Reduce the number of edits by eliminating operationally trivial equalities. + * @param {!Array.} diffs Array of diff tuples. + */ +diff_match_patch.prototype.diff_cleanupEfficiency = function(diffs) { + var changes = false; + var equalities = []; // Stack of indices where equalities are found. + var equalitiesLength = 0; // Keeping our own length var is faster in JS. + /** @type {?string} */ + var lastequality = null; + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer = 0; // Index of current position. + // Is there an insertion operation before the last equality. + var pre_ins = false; + // Is there a deletion operation before the last equality. + var pre_del = false; + // Is there an insertion operation after the last equality. + var post_ins = false; + // Is there a deletion operation after the last equality. + var post_del = false; + while (pointer < diffs.length) { + if (diffs[pointer][0] == DIFF_EQUAL) { // Equality found. + if (diffs[pointer][1].length < this.Diff_EditCost && + (post_ins || post_del)) { + // Candidate found. + equalities[equalitiesLength++] = pointer; + pre_ins = post_ins; + pre_del = post_del; + lastequality = diffs[pointer][1]; + } else { + // Not a candidate, and can never become one. + equalitiesLength = 0; + lastequality = null; + } + post_ins = post_del = false; + } else { // An insertion or deletion. + if (diffs[pointer][0] == DIFF_DELETE) { + post_del = true; + } else { + post_ins = true; + } + /* + * Five types to be split: + * ABXYCD + * AXCD + * ABXC + * AXCD + * ABXC + */ + if (lastequality && ((pre_ins && pre_del && post_ins && post_del) || + ((lastequality.length < this.Diff_EditCost / 2) && + (pre_ins + pre_del + post_ins + post_del) == 3))) { + // Duplicate record. + diffs.splice(equalities[equalitiesLength - 1], 0, + [DIFF_DELETE, lastequality]); + // Change second copy to insert. + diffs[equalities[equalitiesLength - 1] + 1][0] = DIFF_INSERT; + equalitiesLength--; // Throw away the equality we just deleted; + lastequality = null; + if (pre_ins && pre_del) { + // No changes made which could affect previous entry, keep going. + post_ins = post_del = true; + equalitiesLength = 0; + } else { + equalitiesLength--; // Throw away the previous equality. + pointer = equalitiesLength > 0 ? + equalities[equalitiesLength - 1] : -1; + post_ins = post_del = false; + } + changes = true; + } + } + pointer++; + } + + if (changes) { + this.diff_cleanupMerge(diffs); + } +}; + + +/** + * Reorder and merge like edit sections. Merge equalities. + * Any edit section can move as long as it doesn't cross an equality. + * @param {!Array.} diffs Array of diff tuples. + */ +diff_match_patch.prototype.diff_cleanupMerge = function(diffs) { + diffs.push([DIFF_EQUAL, '']); // Add a dummy entry at the end. + var pointer = 0; + var count_delete = 0; + var count_insert = 0; + var text_delete = ''; + var text_insert = ''; + var commonlength; + while (pointer < diffs.length) { + switch (diffs[pointer][0]) { + case DIFF_INSERT: + count_insert++; + text_insert += diffs[pointer][1]; + pointer++; + break; + case DIFF_DELETE: + count_delete++; + text_delete += diffs[pointer][1]; + pointer++; + break; + case DIFF_EQUAL: + // Upon reaching an equality, check for prior redundancies. + if (count_delete + count_insert > 1) { + if (count_delete !== 0 && count_insert !== 0) { + // Factor out any common prefixies. + commonlength = this.diff_commonPrefix(text_insert, text_delete); + if (commonlength !== 0) { + if ((pointer - count_delete - count_insert) > 0 && + diffs[pointer - count_delete - count_insert - 1][0] == + DIFF_EQUAL) { + diffs[pointer - count_delete - count_insert - 1][1] += + text_insert.substring(0, commonlength); + } else { + diffs.splice(0, 0, [DIFF_EQUAL, + text_insert.substring(0, commonlength)]); + pointer++; + } + text_insert = text_insert.substring(commonlength); + text_delete = text_delete.substring(commonlength); + } + // Factor out any common suffixies. + commonlength = this.diff_commonSuffix(text_insert, text_delete); + if (commonlength !== 0) { + diffs[pointer][1] = text_insert.substring(text_insert.length - + commonlength) + diffs[pointer][1]; + text_insert = text_insert.substring(0, text_insert.length - + commonlength); + text_delete = text_delete.substring(0, text_delete.length - + commonlength); + } + } + // Delete the offending records and add the merged ones. + if (count_delete === 0) { + diffs.splice(pointer - count_insert, + count_delete + count_insert, [DIFF_INSERT, text_insert]); + } else if (count_insert === 0) { + diffs.splice(pointer - count_delete, + count_delete + count_insert, [DIFF_DELETE, text_delete]); + } else { + diffs.splice(pointer - count_delete - count_insert, + count_delete + count_insert, [DIFF_DELETE, text_delete], + [DIFF_INSERT, text_insert]); + } + pointer = pointer - count_delete - count_insert + + (count_delete ? 1 : 0) + (count_insert ? 1 : 0) + 1; + } else if (pointer !== 0 && diffs[pointer - 1][0] == DIFF_EQUAL) { + // Merge this equality with the previous one. + diffs[pointer - 1][1] += diffs[pointer][1]; + diffs.splice(pointer, 1); + } else { + pointer++; + } + count_insert = 0; + count_delete = 0; + text_delete = ''; + text_insert = ''; + break; + } + } + if (diffs[diffs.length - 1][1] === '') { + diffs.pop(); // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities + // which can be shifted sideways to eliminate an equality. + // e.g: ABAC -> ABAC + var changes = false; + pointer = 1; + // Intentionally ignore the first and last element (don't need checking). + while (pointer < diffs.length - 1) { + if (diffs[pointer - 1][0] == DIFF_EQUAL && + diffs[pointer + 1][0] == DIFF_EQUAL) { + // This is a single edit surrounded by equalities. + if (diffs[pointer][1].substring(diffs[pointer][1].length - + diffs[pointer - 1][1].length) == diffs[pointer - 1][1]) { + // Shift the edit over the previous equality. + diffs[pointer][1] = diffs[pointer - 1][1] + + diffs[pointer][1].substring(0, diffs[pointer][1].length - + diffs[pointer - 1][1].length); + diffs[pointer + 1][1] = diffs[pointer - 1][1] + diffs[pointer + 1][1]; + diffs.splice(pointer - 1, 1); + changes = true; + } else if (diffs[pointer][1].substring(0, diffs[pointer + 1][1].length) == + diffs[pointer + 1][1]) { + // Shift the edit over the next equality. + diffs[pointer - 1][1] += diffs[pointer + 1][1]; + diffs[pointer][1] = + diffs[pointer][1].substring(diffs[pointer + 1][1].length) + + diffs[pointer + 1][1]; + diffs.splice(pointer + 1, 1); + changes = true; + } + } + pointer++; + } + // If shifts were made, the diff needs reordering and another shift sweep. + if (changes) { + this.diff_cleanupMerge(diffs); + } +}; + + +/** + * loc is a location in text1, compute and return the equivalent location in + * text2. + * e.g. 'The cat' vs 'The big cat', 1->1, 5->8 + * @param {!Array.} diffs Array of diff tuples. + * @param {number} loc Location within text1. + * @return {number} Location within text2. + */ +diff_match_patch.prototype.diff_xIndex = function(diffs, loc) { + var chars1 = 0; + var chars2 = 0; + var last_chars1 = 0; + var last_chars2 = 0; + var x; + for (x = 0; x < diffs.length; x++) { + if (diffs[x][0] !== DIFF_INSERT) { // Equality or deletion. + chars1 += diffs[x][1].length; + } + if (diffs[x][0] !== DIFF_DELETE) { // Equality or insertion. + chars2 += diffs[x][1].length; + } + if (chars1 > loc) { // Overshot the location. + break; + } + last_chars1 = chars1; + last_chars2 = chars2; + } + // Was the location was deleted? + if (diffs.length != x && diffs[x][0] === DIFF_DELETE) { + return last_chars2; + } + // Add the remaining character length. + return last_chars2 + (loc - last_chars1); +}; + + +/** + * Convert a diff array into a pretty HTML report. + * @param {!Array.} diffs Array of diff tuples. + * @return {string} HTML representation. + */ +diff_match_patch.prototype.diff_prettyHtml = function(diffs) { + var html = []; + var pattern_amp = /&/g; + var pattern_lt = //g; + var pattern_para = /\n/g; + for (var x = 0; x < diffs.length; x++) { + var op = diffs[x][0]; // Operation (insert, delete, equal) + var data = diffs[x][1]; // Text of change. + var text = data.replace(pattern_amp, '&').replace(pattern_lt, '<') + .replace(pattern_gt, '>').replace(pattern_para, '¶
'); + switch (op) { + case DIFF_INSERT: + html[x] = '' + text + ''; + break; + case DIFF_DELETE: + html[x] = '' + text + ''; + break; + case DIFF_EQUAL: + html[x] = '' + text + ''; + break; + } + } + return html.join(''); +}; + + +/** + * Compute and return the source text (all equalities and deletions). + * @param {!Array.} diffs Array of diff tuples. + * @return {string} Source text. + */ +diff_match_patch.prototype.diff_text1 = function(diffs) { + var text = []; + for (var x = 0; x < diffs.length; x++) { + if (diffs[x][0] !== DIFF_INSERT) { + text[x] = diffs[x][1]; + } + } + return text.join(''); +}; + + +/** + * Compute and return the destination text (all equalities and insertions). + * @param {!Array.} diffs Array of diff tuples. + * @return {string} Destination text. + */ +diff_match_patch.prototype.diff_text2 = function(diffs) { + var text = []; + for (var x = 0; x < diffs.length; x++) { + if (diffs[x][0] !== DIFF_DELETE) { + text[x] = diffs[x][1]; + } + } + return text.join(''); +}; + + +/** + * Compute the Levenshtein distance; the number of inserted, deleted or + * substituted characters. + * @param {!Array.} diffs Array of diff tuples. + * @return {number} Number of changes. + */ +diff_match_patch.prototype.diff_levenshtein = function(diffs) { + var levenshtein = 0; + var insertions = 0; + var deletions = 0; + for (var x = 0; x < diffs.length; x++) { + var op = diffs[x][0]; + var data = diffs[x][1]; + switch (op) { + case DIFF_INSERT: + insertions += data.length; + break; + case DIFF_DELETE: + deletions += data.length; + break; + case DIFF_EQUAL: + // A deletion and an insertion is one substitution. + levenshtein += Math.max(insertions, deletions); + insertions = 0; + deletions = 0; + break; + } + } + levenshtein += Math.max(insertions, deletions); + return levenshtein; +}; + + +/** + * Crush the diff into an encoded string which describes the operations + * required to transform text1 into text2. + * E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. + * Operations are tab-separated. Inserted text is escaped using %xx notation. + * @param {!Array.} diffs Array of diff tuples. + * @return {string} Delta text. + */ +diff_match_patch.prototype.diff_toDelta = function(diffs) { + var text = []; + for (var x = 0; x < diffs.length; x++) { + switch (diffs[x][0]) { + case DIFF_INSERT: + text[x] = '+' + encodeURI(diffs[x][1]); + break; + case DIFF_DELETE: + text[x] = '-' + diffs[x][1].length; + break; + case DIFF_EQUAL: + text[x] = '=' + diffs[x][1].length; + break; + } + } + return text.join('\t').replace(/%20/g, ' '); +}; + + +/** + * Given the original text1, and an encoded string which describes the + * operations required to transform text1 into text2, compute the full diff. + * @param {string} text1 Source string for the diff. + * @param {string} delta Delta text. + * @return {!Array.} Array of diff tuples. + * @throws {!Error} If invalid input. + */ +diff_match_patch.prototype.diff_fromDelta = function(text1, delta) { + var diffs = []; + var diffsLength = 0; // Keeping our own length var is faster in JS. + var pointer = 0; // Cursor in text1 + var tokens = delta.split(/\t/g); + for (var x = 0; x < tokens.length; x++) { + // Each token begins with a one character parameter which specifies the + // operation of this token (delete, insert, equality). + var param = tokens[x].substring(1); + switch (tokens[x].charAt(0)) { + case '+': + try { + diffs[diffsLength++] = [DIFF_INSERT, decodeURI(param)]; + } catch (ex) { + // Malformed URI sequence. + throw new Error('Illegal escape in diff_fromDelta: ' + param); + } + break; + case '-': + // Fall through. + case '=': + var n = parseInt(param, 10); + if (isNaN(n) || n < 0) { + throw new Error('Invalid number in diff_fromDelta: ' + param); + } + var text = text1.substring(pointer, pointer += n); + if (tokens[x].charAt(0) == '=') { + diffs[diffsLength++] = [DIFF_EQUAL, text]; + } else { + diffs[diffsLength++] = [DIFF_DELETE, text]; + } + break; + default: + // Blank tokens are ok (from a trailing \t). + // Anything else is an error. + if (tokens[x]) { + throw new Error('Invalid diff operation in diff_fromDelta: ' + + tokens[x]); + } + } + } + if (pointer != text1.length) { + throw new Error('Delta length (' + pointer + + ') does not equal source text length (' + text1.length + ').'); + } + return diffs; +}; + + +// MATCH FUNCTIONS + + +/** + * Locate the best instance of 'pattern' in 'text' near 'loc'. + * @param {string} text The text to search. + * @param {string} pattern The pattern to search for. + * @param {number} loc The location to search around. + * @return {number} Best match index or -1. + */ +diff_match_patch.prototype.match_main = function(text, pattern, loc) { + // Check for null inputs. + if (text == null || pattern == null || loc == null) { + throw new Error('Null input. (match_main)'); + } + + loc = Math.max(0, Math.min(loc, text.length)); + if (text == pattern) { + // Shortcut (potentially not guaranteed by the algorithm) + return 0; + } else if (!text.length) { + // Nothing to match. + return -1; + } else if (text.substring(loc, loc + pattern.length) == pattern) { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc; + } else { + // Do a fuzzy compare. + return this.match_bitap_(text, pattern, loc); + } +}; + + +/** + * Locate the best instance of 'pattern' in 'text' near 'loc' using the + * Bitap algorithm. + * @param {string} text The text to search. + * @param {string} pattern The pattern to search for. + * @param {number} loc The location to search around. + * @return {number} Best match index or -1. + * @private + */ +diff_match_patch.prototype.match_bitap_ = function(text, pattern, loc) { + if (pattern.length > this.Match_MaxBits) { + throw new Error('Pattern too long for this browser.'); + } + + // Initialise the alphabet. + var s = this.match_alphabet_(pattern); + + var dmp = this; // 'this' becomes 'window' in a closure. + + /** + * Compute and return the score for a match with e errors and x location. + * Accesses loc and pattern through being a closure. + * @param {number} e Number of errors in match. + * @param {number} x Location of match. + * @return {number} Overall score for match (0.0 = good, 1.0 = bad). + * @private + */ + function match_bitapScore_(e, x) { + var accuracy = e / pattern.length; + var proximity = Math.abs(loc - x); + if (!dmp.Match_Distance) { + // Dodge divide by zero error. + return proximity ? 1.0 : accuracy; + } + return accuracy + (proximity / dmp.Match_Distance); + } + + // Highest score beyond which we give up. + var score_threshold = this.Match_Threshold; + // Is there a nearby exact match? (speedup) + var best_loc = text.indexOf(pattern, loc); + if (best_loc != -1) { + score_threshold = Math.min(match_bitapScore_(0, best_loc), score_threshold); + // What about in the other direction? (speedup) + best_loc = text.lastIndexOf(pattern, loc + pattern.length); + if (best_loc != -1) { + score_threshold = + Math.min(match_bitapScore_(0, best_loc), score_threshold); + } + } + + // Initialise the bit arrays. + var matchmask = 1 << (pattern.length - 1); + best_loc = -1; + + var bin_min, bin_mid; + var bin_max = pattern.length + text.length; + var last_rd; + for (var d = 0; d < pattern.length; d++) { + // Scan for the best match; each iteration allows for one more error. + // Run a binary search to determine how far from 'loc' we can stray at this + // error level. + bin_min = 0; + bin_mid = bin_max; + while (bin_min < bin_mid) { + if (match_bitapScore_(d, loc + bin_mid) <= score_threshold) { + bin_min = bin_mid; + } else { + bin_max = bin_mid; + } + bin_mid = Math.floor((bin_max - bin_min) / 2 + bin_min); + } + // Use the result from this iteration as the maximum for the next. + bin_max = bin_mid; + var start = Math.max(1, loc - bin_mid + 1); + var finish = Math.min(loc + bin_mid, text.length) + pattern.length; + + var rd = Array(finish + 2); + rd[finish + 1] = (1 << d) - 1; + for (var j = finish; j >= start; j--) { + // The alphabet (s) is a sparse hash, so the following line generates + // warnings. + var charMatch = s[text.charAt(j - 1)]; + if (d === 0) { // First pass: exact match. + rd[j] = ((rd[j + 1] << 1) | 1) & charMatch; + } else { // Subsequent passes: fuzzy match. + rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | + (((last_rd[j + 1] | last_rd[j]) << 1) | 1) | + last_rd[j + 1]; + } + if (rd[j] & matchmask) { + var score = match_bitapScore_(d, j - 1); + // This match will almost certainly be better than any existing match. + // But check anyway. + if (score <= score_threshold) { + // Told you so. + score_threshold = score; + best_loc = j - 1; + if (best_loc > loc) { + // When passing loc, don't exceed our current distance from loc. + start = Math.max(1, 2 * loc - best_loc); + } else { + // Already passed loc, downhill from here on in. + break; + } + } + } + } + // No hope for a (better) match at greater error levels. + if (match_bitapScore_(d + 1, loc) > score_threshold) { + break; + } + last_rd = rd; + } + return best_loc; +}; + + +/** + * Initialise the alphabet for the Bitap algorithm. + * @param {string} pattern The text to encode. + * @return {!Object} Hash of character locations. + * @private + */ +diff_match_patch.prototype.match_alphabet_ = function(pattern) { + var s = {}; + for (var i = 0; i < pattern.length; i++) { + s[pattern.charAt(i)] = 0; + } + for (var i = 0; i < pattern.length; i++) { + s[pattern.charAt(i)] |= 1 << (pattern.length - i - 1); + } + return s; +}; + + +// PATCH FUNCTIONS + + +/** + * Increase the context until it is unique, + * but don't let the pattern expand beyond Match_MaxBits. + * @param {!diff_match_patch.patch_obj} patch The patch to grow. + * @param {string} text Source text. + * @private + */ +diff_match_patch.prototype.patch_addContext_ = function(patch, text) { + if (text.length == 0) { + return; + } + var pattern = text.substring(patch.start2, patch.start2 + patch.length1); + var padding = 0; + + // Look for the first and last matches of pattern in text. If two different + // matches are found, increase the pattern length. + while (text.indexOf(pattern) != text.lastIndexOf(pattern) && + pattern.length < this.Match_MaxBits - this.Patch_Margin - + this.Patch_Margin) { + padding += this.Patch_Margin; + pattern = text.substring(patch.start2 - padding, + patch.start2 + patch.length1 + padding); + } + // Add one chunk for good luck. + padding += this.Patch_Margin; + + // Add the prefix. + var prefix = text.substring(patch.start2 - padding, patch.start2); + if (prefix) { + patch.diffs.unshift([DIFF_EQUAL, prefix]); + } + // Add the suffix. + var suffix = text.substring(patch.start2 + patch.length1, + patch.start2 + patch.length1 + padding); + if (suffix) { + patch.diffs.push([DIFF_EQUAL, suffix]); + } + + // Roll back the start points. + patch.start1 -= prefix.length; + patch.start2 -= prefix.length; + // Extend the lengths. + patch.length1 += prefix.length + suffix.length; + patch.length2 += prefix.length + suffix.length; +}; + + +/** + * Compute a list of patches to turn text1 into text2. + * Use diffs if provided, otherwise compute it ourselves. + * There are four ways to call this function, depending on what data is + * available to the caller: + * Method 1: + * a = text1, b = text2 + * Method 2: + * a = diffs + * Method 3 (optimal): + * a = text1, b = diffs + * Method 4 (deprecated, use method 3): + * a = text1, b = text2, c = diffs + * + * @param {string|!Array.} a text1 (methods 1,3,4) or + * Array of diff tuples for text1 to text2 (method 2). + * @param {string|!Array.} opt_b text2 (methods 1,4) or + * Array of diff tuples for text1 to text2 (method 3) or undefined (method 2). + * @param {string|!Array.} opt_c Array of diff tuples + * for text1 to text2 (method 4) or undefined (methods 1,2,3). + * @return {!Array.} Array of Patch objects. + */ +diff_match_patch.prototype.patch_make = function(a, opt_b, opt_c) { + var text1, diffs; + if (typeof a == 'string' && typeof opt_b == 'string' && + typeof opt_c == 'undefined') { + // Method 1: text1, text2 + // Compute diffs from text1 and text2. + text1 = /** @type {string} */(a); + diffs = this.diff_main(text1, /** @type {string} */(opt_b), true); + if (diffs.length > 2) { + this.diff_cleanupSemantic(diffs); + this.diff_cleanupEfficiency(diffs); + } + } else if (a && typeof a == 'object' && typeof opt_b == 'undefined' && + typeof opt_c == 'undefined') { + // Method 2: diffs + // Compute text1 from diffs. + diffs = /** @type {!Array.} */(a); + text1 = this.diff_text1(diffs); + } else if (typeof a == 'string' && opt_b && typeof opt_b == 'object' && + typeof opt_c == 'undefined') { + // Method 3: text1, diffs + text1 = /** @type {string} */(a); + diffs = /** @type {!Array.} */(opt_b); + } else if (typeof a == 'string' && typeof opt_b == 'string' && + opt_c && typeof opt_c == 'object') { + // Method 4: text1, text2, diffs + // text2 is not used. + text1 = /** @type {string} */(a); + diffs = /** @type {!Array.} */(opt_c); + } else { + throw new Error('Unknown call format to patch_make.'); + } + + if (diffs.length === 0) { + return []; // Get rid of the null case. + } + var patches = []; + var patch = new diff_match_patch.patch_obj(); + var patchDiffLength = 0; // Keeping our own length var is faster in JS. + var char_count1 = 0; // Number of characters into the text1 string. + var char_count2 = 0; // Number of characters into the text2 string. + // Start with text1 (prepatch_text) and apply the diffs until we arrive at + // text2 (postpatch_text). We recreate the patches one by one to determine + // context info. + var prepatch_text = text1; + var postpatch_text = text1; + for (var x = 0; x < diffs.length; x++) { + var diff_type = diffs[x][0]; + var diff_text = diffs[x][1]; + + if (!patchDiffLength && diff_type !== DIFF_EQUAL) { + // A new patch starts here. + patch.start1 = char_count1; + patch.start2 = char_count2; + } + + switch (diff_type) { + case DIFF_INSERT: + patch.diffs[patchDiffLength++] = diffs[x]; + patch.length2 += diff_text.length; + postpatch_text = postpatch_text.substring(0, char_count2) + diff_text + + postpatch_text.substring(char_count2); + break; + case DIFF_DELETE: + patch.length1 += diff_text.length; + patch.diffs[patchDiffLength++] = diffs[x]; + postpatch_text = postpatch_text.substring(0, char_count2) + + postpatch_text.substring(char_count2 + + diff_text.length); + break; + case DIFF_EQUAL: + if (diff_text.length <= 2 * this.Patch_Margin && + patchDiffLength && diffs.length != x + 1) { + // Small equality inside a patch. + patch.diffs[patchDiffLength++] = diffs[x]; + patch.length1 += diff_text.length; + patch.length2 += diff_text.length; + } else if (diff_text.length >= 2 * this.Patch_Margin) { + // Time for a new patch. + if (patchDiffLength) { + this.patch_addContext_(patch, prepatch_text); + patches.push(patch); + patch = new diff_match_patch.patch_obj(); + patchDiffLength = 0; + // Unlike Unidiff, our patch lists have a rolling context. + // http://code.google.com/p/google-diff-match-patch/wiki/Unidiff + // Update prepatch text & pos to reflect the application of the + // just completed patch. + prepatch_text = postpatch_text; + char_count1 = char_count2; + } + } + break; + } + + // Update the current character count. + if (diff_type !== DIFF_INSERT) { + char_count1 += diff_text.length; + } + if (diff_type !== DIFF_DELETE) { + char_count2 += diff_text.length; + } + } + // Pick up the leftover patch if not empty. + if (patchDiffLength) { + this.patch_addContext_(patch, prepatch_text); + patches.push(patch); + } + + return patches; +}; + + +/** + * Given an array of patches, return another array that is identical. + * @param {!Array.} patches Array of Patch objects. + * @return {!Array.} Array of Patch objects. + */ +diff_match_patch.prototype.patch_deepCopy = function(patches) { + // Making deep copies is hard in JavaScript. + var patchesCopy = []; + for (var x = 0; x < patches.length; x++) { + var patch = patches[x]; + var patchCopy = new diff_match_patch.patch_obj(); + patchCopy.diffs = []; + for (var y = 0; y < patch.diffs.length; y++) { + patchCopy.diffs[y] = patch.diffs[y].slice(); + } + patchCopy.start1 = patch.start1; + patchCopy.start2 = patch.start2; + patchCopy.length1 = patch.length1; + patchCopy.length2 = patch.length2; + patchesCopy[x] = patchCopy; + } + return patchesCopy; +}; + + +/** + * Merge a set of patches onto the text. Return a patched text, as well + * as a list of true/false values indicating which patches were applied. + * @param {!Array.} patches Array of Patch objects. + * @param {string} text Old text. + * @return {!Array.>} Two element Array, containing the + * new text and an array of boolean values. + */ +diff_match_patch.prototype.patch_apply = function(patches, text) { + if (patches.length == 0) { + return [text, []]; + } + + // Deep copy the patches so that no changes are made to originals. + patches = this.patch_deepCopy(patches); + + var nullPadding = this.patch_addPadding(patches); + text = nullPadding + text + nullPadding; + + this.patch_splitMax(patches); + // delta keeps track of the offset between the expected and actual location + // of the previous patch. If there are patches expected at positions 10 and + // 20, but the first patch was found at 12, delta is 2 and the second patch + // has an effective expected position of 22. + var delta = 0; + var results = []; + for (var x = 0; x < patches.length; x++) { + var expected_loc = patches[x].start2 + delta; + var text1 = this.diff_text1(patches[x].diffs); + var start_loc; + var end_loc = -1; + if (text1.length > this.Match_MaxBits) { + // patch_splitMax will only provide an oversized pattern in the case of + // a monster delete. + start_loc = this.match_main(text, text1.substring(0, this.Match_MaxBits), + expected_loc); + if (start_loc != -1) { + end_loc = this.match_main(text, + text1.substring(text1.length - this.Match_MaxBits), + expected_loc + text1.length - this.Match_MaxBits); + if (end_loc == -1 || start_loc >= end_loc) { + // Can't find valid trailing context. Drop this patch. + start_loc = -1; + } + } + } else { + start_loc = this.match_main(text, text1, expected_loc); + } + if (start_loc == -1) { + // No match found. :( + results[x] = false; + // Subtract the delta for this failed patch from subsequent patches. + delta -= patches[x].length2 - patches[x].length1; + } else { + // Found a match. :) + results[x] = true; + delta = start_loc - expected_loc; + var text2; + if (end_loc == -1) { + text2 = text.substring(start_loc, start_loc + text1.length); + } else { + text2 = text.substring(start_loc, end_loc + this.Match_MaxBits); + } + if (text1 == text2) { + // Perfect match, just shove the replacement text in. + text = text.substring(0, start_loc) + + this.diff_text2(patches[x].diffs) + + text.substring(start_loc + text1.length); + } else { + // Imperfect match. Run a diff to get a framework of equivalent + // indices. + var diffs = this.diff_main(text1, text2, false); + if (text1.length > this.Match_MaxBits && + this.diff_levenshtein(diffs) / text1.length > + this.Patch_DeleteThreshold) { + // The end points match, but the content is unacceptably bad. + results[x] = false; + } else { + this.diff_cleanupSemanticLossless(diffs); + var index1 = 0; + var index2; + for (var y = 0; y < patches[x].diffs.length; y++) { + var mod = patches[x].diffs[y]; + if (mod[0] !== DIFF_EQUAL) { + index2 = this.diff_xIndex(diffs, index1); + } + if (mod[0] === DIFF_INSERT) { // Insertion + text = text.substring(0, start_loc + index2) + mod[1] + + text.substring(start_loc + index2); + } else if (mod[0] === DIFF_DELETE) { // Deletion + text = text.substring(0, start_loc + index2) + + text.substring(start_loc + this.diff_xIndex(diffs, + index1 + mod[1].length)); + } + if (mod[0] !== DIFF_DELETE) { + index1 += mod[1].length; + } + } + } + } + } + } + // Strip the padding off. + text = text.substring(nullPadding.length, text.length - nullPadding.length); + return [text, results]; +}; + + +/** + * Add some padding on text start and end so that edges can match something. + * Intended to be called only from within patch_apply. + * @param {!Array.} patches Array of Patch objects. + * @return {string} The padding string added to each side. + */ +diff_match_patch.prototype.patch_addPadding = function(patches) { + var paddingLength = this.Patch_Margin; + var nullPadding = ''; + for (var x = 1; x <= paddingLength; x++) { + nullPadding += String.fromCharCode(x); + } + + // Bump all the patches forward. + for (var x = 0; x < patches.length; x++) { + patches[x].start1 += paddingLength; + patches[x].start2 += paddingLength; + } + + // Add some padding on start of first diff. + var patch = patches[0]; + var diffs = patch.diffs; + if (diffs.length == 0 || diffs[0][0] != DIFF_EQUAL) { + // Add nullPadding equality. + diffs.unshift([DIFF_EQUAL, nullPadding]); + patch.start1 -= paddingLength; // Should be 0. + patch.start2 -= paddingLength; // Should be 0. + patch.length1 += paddingLength; + patch.length2 += paddingLength; + } else if (paddingLength > diffs[0][1].length) { + // Grow first equality. + var extraLength = paddingLength - diffs[0][1].length; + diffs[0][1] = nullPadding.substring(diffs[0][1].length) + diffs[0][1]; + patch.start1 -= extraLength; + patch.start2 -= extraLength; + patch.length1 += extraLength; + patch.length2 += extraLength; + } + + // Add some padding on end of last diff. + patch = patches[patches.length - 1]; + diffs = patch.diffs; + if (diffs.length == 0 || diffs[diffs.length - 1][0] != DIFF_EQUAL) { + // Add nullPadding equality. + diffs.push([DIFF_EQUAL, nullPadding]); + patch.length1 += paddingLength; + patch.length2 += paddingLength; + } else if (paddingLength > diffs[diffs.length - 1][1].length) { + // Grow last equality. + var extraLength = paddingLength - diffs[diffs.length - 1][1].length; + diffs[diffs.length - 1][1] += nullPadding.substring(0, extraLength); + patch.length1 += extraLength; + patch.length2 += extraLength; + } + + return nullPadding; +}; + + +/** + * Look through the patches and break up any which are longer than the maximum + * limit of the match algorithm. + * Intended to be called only from within patch_apply. + * @param {!Array.} patches Array of Patch objects. + */ +diff_match_patch.prototype.patch_splitMax = function(patches) { + var patch_size = this.Match_MaxBits; + for (var x = 0; x < patches.length; x++) { + if (patches[x].length1 <= patch_size) { + continue; + } + var bigpatch = patches[x]; + // Remove the big old patch. + patches.splice(x--, 1); + var start1 = bigpatch.start1; + var start2 = bigpatch.start2; + var precontext = ''; + while (bigpatch.diffs.length !== 0) { + // Create one of several smaller patches. + var patch = new diff_match_patch.patch_obj(); + var empty = true; + patch.start1 = start1 - precontext.length; + patch.start2 = start2 - precontext.length; + if (precontext !== '') { + patch.length1 = patch.length2 = precontext.length; + patch.diffs.push([DIFF_EQUAL, precontext]); + } + while (bigpatch.diffs.length !== 0 && + patch.length1 < patch_size - this.Patch_Margin) { + var diff_type = bigpatch.diffs[0][0]; + var diff_text = bigpatch.diffs[0][1]; + if (diff_type === DIFF_INSERT) { + // Insertions are harmless. + patch.length2 += diff_text.length; + start2 += diff_text.length; + patch.diffs.push(bigpatch.diffs.shift()); + empty = false; + } else if (diff_type === DIFF_DELETE && patch.diffs.length == 1 && + patch.diffs[0][0] == DIFF_EQUAL && + diff_text.length > 2 * patch_size) { + // This is a large deletion. Let it pass in one chunk. + patch.length1 += diff_text.length; + start1 += diff_text.length; + empty = false; + patch.diffs.push([diff_type, diff_text]); + bigpatch.diffs.shift(); + } else { + // Deletion or equality. Only take as much as we can stomach. + diff_text = diff_text.substring(0, + patch_size - patch.length1 - this.Patch_Margin); + patch.length1 += diff_text.length; + start1 += diff_text.length; + if (diff_type === DIFF_EQUAL) { + patch.length2 += diff_text.length; + start2 += diff_text.length; + } else { + empty = false; + } + patch.diffs.push([diff_type, diff_text]); + if (diff_text == bigpatch.diffs[0][1]) { + bigpatch.diffs.shift(); + } else { + bigpatch.diffs[0][1] = + bigpatch.diffs[0][1].substring(diff_text.length); + } + } + } + // Compute the head context for the next patch. + precontext = this.diff_text2(patch.diffs); + precontext = + precontext.substring(precontext.length - this.Patch_Margin); + // Append the end context for this patch. + var postcontext = this.diff_text1(bigpatch.diffs) + .substring(0, this.Patch_Margin); + if (postcontext !== '') { + patch.length1 += postcontext.length; + patch.length2 += postcontext.length; + if (patch.diffs.length !== 0 && + patch.diffs[patch.diffs.length - 1][0] === DIFF_EQUAL) { + patch.diffs[patch.diffs.length - 1][1] += postcontext; + } else { + patch.diffs.push([DIFF_EQUAL, postcontext]); + } + } + if (!empty) { + patches.splice(++x, 0, patch); + } + } + } +}; + + +/** + * Take a list of patches and return a textual representation. + * @param {!Array.} patches Array of Patch objects. + * @return {string} Text representation of patches. + */ +diff_match_patch.prototype.patch_toText = function(patches) { + var text = []; + for (var x = 0; x < patches.length; x++) { + text[x] = patches[x]; + } + return text.join(''); +}; + + +/** + * Parse a textual representation of patches and return a list of Patch objects. + * @param {string} textline Text representation of patches. + * @return {!Array.} Array of Patch objects. + * @throws {!Error} If invalid input. + */ +diff_match_patch.prototype.patch_fromText = function(textline) { + var patches = []; + if (!textline) { + return patches; + } + var text = textline.split('\n'); + var textPointer = 0; + var patchHeader = /^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$/; + while (textPointer < text.length) { + var m = text[textPointer].match(patchHeader); + if (!m) { + throw new Error('Invalid patch string: ' + text[textPointer]); + } + var patch = new diff_match_patch.patch_obj(); + patches.push(patch); + patch.start1 = parseInt(m[1], 10); + if (m[2] === '') { + patch.start1--; + patch.length1 = 1; + } else if (m[2] == '0') { + patch.length1 = 0; + } else { + patch.start1--; + patch.length1 = parseInt(m[2], 10); + } + + patch.start2 = parseInt(m[3], 10); + if (m[4] === '') { + patch.start2--; + patch.length2 = 1; + } else if (m[4] == '0') { + patch.length2 = 0; + } else { + patch.start2--; + patch.length2 = parseInt(m[4], 10); + } + textPointer++; + + while (textPointer < text.length) { + var sign = text[textPointer].charAt(0); + try { + var line = decodeURI(text[textPointer].substring(1)); + } catch (ex) { + // Malformed URI sequence. + throw new Error('Illegal escape in patch_fromText: ' + line); + } + if (sign == '-') { + // Deletion. + patch.diffs.push([DIFF_DELETE, line]); + } else if (sign == '+') { + // Insertion. + patch.diffs.push([DIFF_INSERT, line]); + } else if (sign == ' ') { + // Minor equality. + patch.diffs.push([DIFF_EQUAL, line]); + } else if (sign == '@') { + // Start of next patch. + break; + } else if (sign === '') { + // Blank line? Whatever. + } else { + // WTF? + throw new Error('Invalid patch mode "' + sign + '" in: ' + line); + } + textPointer++; + } + } + return patches; +}; + + +/** + * Class representing one patch operation. + * @constructor + */ +diff_match_patch.patch_obj = function() { + /** @type {!Array.} */ + this.diffs = []; + /** @type {?number} */ + this.start1 = null; + /** @type {?number} */ + this.start2 = null; + /** @type {number} */ + this.length1 = 0; + /** @type {number} */ + this.length2 = 0; +}; + + +/** + * Emmulate GNU diff's format. + * Header: @@ -382,8 +481,9 @@ + * Indicies are printed as 1-based, not 0-based. + * @return {string} The GNU diff string. + */ +diff_match_patch.patch_obj.prototype.toString = function() { + var coords1, coords2; + if (this.length1 === 0) { + coords1 = this.start1 + ',0'; + } else if (this.length1 == 1) { + coords1 = this.start1 + 1; + } else { + coords1 = (this.start1 + 1) + ',' + this.length1; + } + if (this.length2 === 0) { + coords2 = this.start2 + ',0'; + } else if (this.length2 == 1) { + coords2 = this.start2 + 1; + } else { + coords2 = (this.start2 + 1) + ',' + this.length2; + } + var text = ['@@ -' + coords1 + ' +' + coords2 + ' @@\n']; + var op; + // Escape the body of the patch with %xx notation. + for (var x = 0; x < this.diffs.length; x++) { + switch (this.diffs[x][0]) { + case DIFF_INSERT: + op = '+'; + break; + case DIFF_DELETE: + op = '-'; + break; + case DIFF_EQUAL: + op = ' '; + break; + } + text[x + 1] = op + encodeURI(this.diffs[x][1]) + '\n'; + } + return text.join('').replace(/%20/g, ' '); +}; + + +// Export these global variables so that they survive Google's JS compiler. +// In a browser, 'this' will be 'window'. +// Users of node.js should 'require' the uncompressed version since Google's +// JS compiler may break the following exports for non-browser environments. +this['diff_match_patch'] = diff_match_patch; +this['DIFF_DELETE'] = DIFF_DELETE; +this['DIFF_INSERT'] = DIFF_INSERT; +this['DIFF_EQUAL'] = DIFF_EQUAL; diff --git a/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-ui.js b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-ui.js new file mode 100644 index 0000000000..eb4ec72365 --- /dev/null +++ b/vendor/github.com/polydawn/refmt/.gopath/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-ui.js @@ -0,0 +1,15008 @@ +/*! jQuery UI - v1.10.4 - 2014-01-17 +* http://jqueryui.com +* Includes: jquery.ui.core.js, jquery.ui.widget.js, jquery.ui.mouse.js, jquery.ui.position.js, jquery.ui.accordion.js, jquery.ui.autocomplete.js, jquery.ui.button.js, jquery.ui.datepicker.js, jquery.ui.dialog.js, jquery.ui.draggable.js, jquery.ui.droppable.js, jquery.ui.effect.js, jquery.ui.effect-blind.js, jquery.ui.effect-bounce.js, jquery.ui.effect-clip.js, jquery.ui.effect-drop.js, jquery.ui.effect-explode.js, jquery.ui.effect-fade.js, jquery.ui.effect-fold.js, jquery.ui.effect-highlight.js, jquery.ui.effect-pulsate.js, jquery.ui.effect-scale.js, jquery.ui.effect-shake.js, jquery.ui.effect-slide.js, jquery.ui.effect-transfer.js, jquery.ui.menu.js, jquery.ui.progressbar.js, jquery.ui.resizable.js, jquery.ui.selectable.js, jquery.ui.slider.js, jquery.ui.sortable.js, jquery.ui.spinner.js, jquery.ui.tabs.js, jquery.ui.tooltip.js +* Copyright 2014 jQuery Foundation and other contributors; Licensed MIT */ + +(function( $, undefined ) { + +var uuid = 0, + runiqueId = /^ui-id-\d+$/; + +// $.ui might exist from components with no dependencies, e.g., $.ui.position +$.ui = $.ui || {}; + +$.extend( $.ui, { + version: "1.10.4", + + keyCode: { + BACKSPACE: 8, + COMMA: 188, + DELETE: 46, + DOWN: 40, + END: 35, + ENTER: 13, + ESCAPE: 27, + HOME: 36, + LEFT: 37, + NUMPAD_ADD: 107, + NUMPAD_DECIMAL: 110, + NUMPAD_DIVIDE: 111, + NUMPAD_ENTER: 108, + NUMPAD_MULTIPLY: 106, + NUMPAD_SUBTRACT: 109, + PAGE_DOWN: 34, + PAGE_UP: 33, + PERIOD: 190, + RIGHT: 39, + SPACE: 32, + TAB: 9, + UP: 38 + } +}); + +// plugins +$.fn.extend({ + focus: (function( orig ) { + return function( delay, fn ) { + return typeof delay === "number" ? + this.each(function() { + var elem = this; + setTimeout(function() { + $( elem ).focus(); + if ( fn ) { + fn.call( elem ); + } + }, delay ); + }) : + orig.apply( this, arguments ); + }; + })( $.fn.focus ), + + scrollParent: function() { + var scrollParent; + if (($.ui.ie && (/(static|relative)/).test(this.css("position"))) || (/absolute/).test(this.css("position"))) { + scrollParent = this.parents().filter(function() { + return (/(relative|absolute|fixed)/).test($.css(this,"position")) && (/(auto|scroll)/).test($.css(this,"overflow")+$.css(this,"overflow-y")+$.css(this,"overflow-x")); + }).eq(0); + } else { + scrollParent = this.parents().filter(function() { + return (/(auto|scroll)/).test($.css(this,"overflow")+$.css(this,"overflow-y")+$.css(this,"overflow-x")); + }).eq(0); + } + + return (/fixed/).test(this.css("position")) || !scrollParent.length ? $(document) : scrollParent; + }, + + zIndex: function( zIndex ) { + if ( zIndex !== undefined ) { + return this.css( "zIndex", zIndex ); + } + + if ( this.length ) { + var elem = $( this[ 0 ] ), position, value; + while ( elem.length && elem[ 0 ] !== document ) { + // Ignore z-index if position is set to a value where z-index is ignored by the browser + // This makes behavior of this function consistent across browsers + // WebKit always returns auto if the element is positioned + position = elem.css( "position" ); + if ( position === "absolute" || position === "relative" || position === "fixed" ) { + // IE returns 0 when zIndex is not specified + // other browsers return a string + // we ignore the case of nested elements with an explicit value of 0 + //
+ value = parseInt( elem.css( "zIndex" ), 10 ); + if ( !isNaN( value ) && value !== 0 ) { + return value; + } + } + elem = elem.parent(); + } + } + + return 0; + }, + + uniqueId: function() { + return this.each(function() { + if ( !this.id ) { + this.id = "ui-id-" + (++uuid); + } + }); + }, + + removeUniqueId: function() { + return this.each(function() { + if ( runiqueId.test( this.id ) ) { + $( this ).removeAttr( "id" ); + } + }); + } +}); + +// selectors +function focusable( element, isTabIndexNotNaN ) { + var map, mapName, img, + nodeName = element.nodeName.toLowerCase(); + if ( "area" === nodeName ) { + map = element.parentNode; + mapName = map.name; + if ( !element.href || !mapName || map.nodeName.toLowerCase() !== "map" ) { + return false; + } + img = $( "img[usemap=#" + mapName + "]" )[0]; + return !!img && visible( img ); + } + return ( /input|select|textarea|button|object/.test( nodeName ) ? + !element.disabled : + "a" === nodeName ? + element.href || isTabIndexNotNaN : + isTabIndexNotNaN) && + // the element and all of its ancestors must be visible + visible( element ); +} + +function visible( element ) { + return $.expr.filters.visible( element ) && + !$( element ).parents().addBack().filter(function() { + return $.css( this, "visibility" ) === "hidden"; + }).length; +} + +$.extend( $.expr[ ":" ], { + data: $.expr.createPseudo ? + $.expr.createPseudo(function( dataName ) { + return function( elem ) { + return !!$.data( elem, dataName ); + }; + }) : + // support: jQuery <1.8 + function( elem, i, match ) { + return !!$.data( elem, match[ 3 ] ); + }, + + focusable: function( element ) { + return focusable( element, !isNaN( $.attr( element, "tabindex" ) ) ); + }, + + tabbable: function( element ) { + var tabIndex = $.attr( element, "tabindex" ), + isTabIndexNaN = isNaN( tabIndex ); + return ( isTabIndexNaN || tabIndex >= 0 ) && focusable( element, !isTabIndexNaN ); + } +}); + +// support: jQuery <1.8 +if ( !$( "" ).outerWidth( 1 ).jquery ) { + $.each( [ "Width", "Height" ], function( i, name ) { + var side = name === "Width" ? [ "Left", "Right" ] : [ "Top", "Bottom" ], + type = name.toLowerCase(), + orig = { + innerWidth: $.fn.innerWidth, + innerHeight: $.fn.innerHeight, + outerWidth: $.fn.outerWidth, + outerHeight: $.fn.outerHeight + }; + + function reduce( elem, size, border, margin ) { + $.each( side, function() { + size -= parseFloat( $.css( elem, "padding" + this ) ) || 0; + if ( border ) { + size -= parseFloat( $.css( elem, "border" + this + "Width" ) ) || 0; + } + if ( margin ) { + size -= parseFloat( $.css( elem, "margin" + this ) ) || 0; + } + }); + return size; + } + + $.fn[ "inner" + name ] = function( size ) { + if ( size === undefined ) { + return orig[ "inner" + name ].call( this ); + } + + return this.each(function() { + $( this ).css( type, reduce( this, size ) + "px" ); + }); + }; + + $.fn[ "outer" + name] = function( size, margin ) { + if ( typeof size !== "number" ) { + return orig[ "outer" + name ].call( this, size ); + } + + return this.each(function() { + $( this).css( type, reduce( this, size, true, margin ) + "px" ); + }); + }; + }); +} + +// support: jQuery <1.8 +if ( !$.fn.addBack ) { + $.fn.addBack = function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + }; +} + +// support: jQuery 1.6.1, 1.6.2 (http://bugs.jquery.com/ticket/9413) +if ( $( "" ).data( "a-b", "a" ).removeData( "a-b" ).data( "a-b" ) ) { + $.fn.removeData = (function( removeData ) { + return function( key ) { + if ( arguments.length ) { + return removeData.call( this, $.camelCase( key ) ); + } else { + return removeData.call( this ); + } + }; + })( $.fn.removeData ); +} + + + + + +// deprecated +$.ui.ie = !!/msie [\w.]+/.exec( navigator.userAgent.toLowerCase() ); + +$.support.selectstart = "onselectstart" in document.createElement( "div" ); +$.fn.extend({ + disableSelection: function() { + return this.bind( ( $.support.selectstart ? "selectstart" : "mousedown" ) + + ".ui-disableSelection", function( event ) { + event.preventDefault(); + }); + }, + + enableSelection: function() { + return this.unbind( ".ui-disableSelection" ); + } +}); + +$.extend( $.ui, { + // $.ui.plugin is deprecated. Use $.widget() extensions instead. + plugin: { + add: function( module, option, set ) { + var i, + proto = $.ui[ module ].prototype; + for ( i in set ) { + proto.plugins[ i ] = proto.plugins[ i ] || []; + proto.plugins[ i ].push( [ option, set[ i ] ] ); + } + }, + call: function( instance, name, args ) { + var i, + set = instance.plugins[ name ]; + if ( !set || !instance.element[ 0 ].parentNode || instance.element[ 0 ].parentNode.nodeType === 11 ) { + return; + } + + for ( i = 0; i < set.length; i++ ) { + if ( instance.options[ set[ i ][ 0 ] ] ) { + set[ i ][ 1 ].apply( instance.element, args ); + } + } + } + }, + + // only used by resizable + hasScroll: function( el, a ) { + + //If overflow is hidden, the element might have extra content, but the user wants to hide it + if ( $( el ).css( "overflow" ) === "hidden") { + return false; + } + + var scroll = ( a && a === "left" ) ? "scrollLeft" : "scrollTop", + has = false; + + if ( el[ scroll ] > 0 ) { + return true; + } + + // TODO: determine which cases actually cause this to happen + // if the element doesn't have the scroll set, see if it's possible to + // set the scroll + el[ scroll ] = 1; + has = ( el[ scroll ] > 0 ); + el[ scroll ] = 0; + return has; + } +}); + +})( jQuery ); +(function( $, undefined ) { + +var uuid = 0, + slice = Array.prototype.slice, + _cleanData = $.cleanData; +$.cleanData = function( elems ) { + for ( var i = 0, elem; (elem = elems[i]) != null; i++ ) { + try { + $( elem ).triggerHandler( "remove" ); + // http://bugs.jquery.com/ticket/8235 + } catch( e ) {} + } + _cleanData( elems ); +}; + +$.widget = function( name, base, prototype ) { + var fullName, existingConstructor, constructor, basePrototype, + // proxiedPrototype allows the provided prototype to remain unmodified + // so that it can be used as a mixin for multiple widgets (#8876) + proxiedPrototype = {}, + namespace = name.split( "." )[ 0 ]; + + name = name.split( "." )[ 1 ]; + fullName = namespace + "-" + name; + + if ( !prototype ) { + prototype = base; + base = $.Widget; + } + + // create selector for plugin + $.expr[ ":" ][ fullName.toLowerCase() ] = function( elem ) { + return !!$.data( elem, fullName ); + }; + + $[ namespace ] = $[ namespace ] || {}; + existingConstructor = $[ namespace ][ name ]; + constructor = $[ namespace ][ name ] = function( options, element ) { + // allow instantiation without "new" keyword + if ( !this._createWidget ) { + return new constructor( options, element ); + } + + // allow instantiation without initializing for simple inheritance + // must use "new" keyword (the code above always passes args) + if ( arguments.length ) { + this._createWidget( options, element ); + } + }; + // extend with the existing constructor to carry over any static properties + $.extend( constructor, existingConstructor, { + version: prototype.version, + // copy the object used to create the prototype in case we need to + // redefine the widget later + _proto: $.extend( {}, prototype ), + // track widgets that inherit from this widget in case this widget is + // redefined after a widget inherits from it + _childConstructors: [] + }); + + basePrototype = new base(); + // we need to make the options hash a property directly on the new instance + // otherwise we'll modify the options hash on the prototype that we're + // inheriting from + basePrototype.options = $.widget.extend( {}, basePrototype.options ); + $.each( prototype, function( prop, value ) { + if ( !$.isFunction( value ) ) { + proxiedPrototype[ prop ] = value; + return; + } + proxiedPrototype[ prop ] = (function() { + var _super = function() { + return base.prototype[ prop ].apply( this, arguments ); + }, + _superApply = function( args ) { + return base.prototype[ prop ].apply( this, args ); + }; + return function() { + var __super = this._super, + __superApply = this._superApply, + returnValue; + + this._super = _super; + this._superApply = _superApply; + + returnValue = value.apply( this, arguments ); + + this._super = __super; + this._superApply = __superApply; + + return returnValue; + }; + })(); + }); + constructor.prototype = $.widget.extend( basePrototype, { + // TODO: remove support for widgetEventPrefix + // always use the name + a colon as the prefix, e.g., draggable:start + // don't prefix for widgets that aren't DOM-based + widgetEventPrefix: existingConstructor ? (basePrototype.widgetEventPrefix || name) : name + }, proxiedPrototype, { + constructor: constructor, + namespace: namespace, + widgetName: name, + widgetFullName: fullName + }); + + // If this widget is being redefined then we need to find all widgets that + // are inheriting from it and redefine all of them so that they inherit from + // the new version of this widget. We're essentially trying to replace one + // level in the prototype chain. + if ( existingConstructor ) { + $.each( existingConstructor._childConstructors, function( i, child ) { + var childPrototype = child.prototype; + + // redefine the child widget using the same prototype that was + // originally used, but inherit from the new version of the base + $.widget( childPrototype.namespace + "." + childPrototype.widgetName, constructor, child._proto ); + }); + // remove the list of existing child constructors from the old constructor + // so the old child constructors can be garbage collected + delete existingConstructor._childConstructors; + } else { + base._childConstructors.push( constructor ); + } + + $.widget.bridge( name, constructor ); +}; + +$.widget.extend = function( target ) { + var input = slice.call( arguments, 1 ), + inputIndex = 0, + inputLength = input.length, + key, + value; + for ( ; inputIndex < inputLength; inputIndex++ ) { + for ( key in input[ inputIndex ] ) { + value = input[ inputIndex ][ key ]; + if ( input[ inputIndex ].hasOwnProperty( key ) && value !== undefined ) { + // Clone objects + if ( $.isPlainObject( value ) ) { + target[ key ] = $.isPlainObject( target[ key ] ) ? + $.widget.extend( {}, target[ key ], value ) : + // Don't extend strings, arrays, etc. with objects + $.widget.extend( {}, value ); + // Copy everything else by reference + } else { + target[ key ] = value; + } + } + } + } + return target; +}; + +$.widget.bridge = function( name, object ) { + var fullName = object.prototype.widgetFullName || name; + $.fn[ name ] = function( options ) { + var isMethodCall = typeof options === "string", + args = slice.call( arguments, 1 ), + returnValue = this; + + // allow multiple hashes to be passed on init + options = !isMethodCall && args.length ? + $.widget.extend.apply( null, [ options ].concat(args) ) : + options; + + if ( isMethodCall ) { + this.each(function() { + var methodValue, + instance = $.data( this, fullName ); + if ( !instance ) { + return $.error( "cannot call methods on " + name + " prior to initialization; " + + "attempted to call method '" + options + "'" ); + } + if ( !$.isFunction( instance[options] ) || options.charAt( 0 ) === "_" ) { + return $.error( "no such method '" + options + "' for " + name + " widget instance" ); + } + methodValue = instance[ options ].apply( instance, args ); + if ( methodValue !== instance && methodValue !== undefined ) { + returnValue = methodValue && methodValue.jquery ? + returnValue.pushStack( methodValue.get() ) : + methodValue; + return false; + } + }); + } else { + this.each(function() { + var instance = $.data( this, fullName ); + if ( instance ) { + instance.option( options || {} )._init(); + } else { + $.data( this, fullName, new object( options, this ) ); + } + }); + } + + return returnValue; + }; +}; + +$.Widget = function( /* options, element */ ) {}; +$.Widget._childConstructors = []; + +$.Widget.prototype = { + widgetName: "widget", + widgetEventPrefix: "", + defaultElement: "
", + options: { + disabled: false, + + // callbacks + create: null + }, + _createWidget: function( options, element ) { + element = $( element || this.defaultElement || this )[ 0 ]; + this.element = $( element ); + this.uuid = uuid++; + this.eventNamespace = "." + this.widgetName + this.uuid; + this.options = $.widget.extend( {}, + this.options, + this._getCreateOptions(), + options ); + + this.bindings = $(); + this.hoverable = $(); + this.focusable = $(); + + if ( element !== this ) { + $.data( element, this.widgetFullName, this ); + this._on( true, this.element, { + remove: function( event ) { + if ( event.target === element ) { + this.destroy(); + } + } + }); + this.document = $( element.style ? + // element within the document + element.ownerDocument : + // element is window or document + element.document || element ); + this.window = $( this.document[0].defaultView || this.document[0].parentWindow ); + } + + this._create(); + this._trigger( "create", null, this._getCreateEventData() ); + this._init(); + }, + _getCreateOptions: $.noop, + _getCreateEventData: $.noop, + _create: $.noop, + _init: $.noop, + + destroy: function() { + this._destroy(); + // we can probably remove the unbind calls in 2.0 + // all event bindings should go through this._on() + this.element + .unbind( this.eventNamespace ) + // 1.9 BC for #7810 + // TODO remove dual storage + .removeData( this.widgetName ) + .removeData( this.widgetFullName ) + // support: jquery <1.6.3 + // http://bugs.jquery.com/ticket/9413 + .removeData( $.camelCase( this.widgetFullName ) ); + this.widget() + .unbind( this.eventNamespace ) + .removeAttr( "aria-disabled" ) + .removeClass( + this.widgetFullName + "-disabled " + + "ui-state-disabled" ); + + // clean up events and states + this.bindings.unbind( this.eventNamespace ); + this.hoverable.removeClass( "ui-state-hover" ); + this.focusable.removeClass( "ui-state-focus" ); + }, + _destroy: $.noop, + + widget: function() { + return this.element; + }, + + option: function( key, value ) { + var options = key, + parts, + curOption, + i; + + if ( arguments.length === 0 ) { + // don't return a reference to the internal hash + return $.widget.extend( {}, this.options ); + } + + if ( typeof key === "string" ) { + // handle nested keys, e.g., "foo.bar" => { foo: { bar: ___ } } + options = {}; + parts = key.split( "." ); + key = parts.shift(); + if ( parts.length ) { + curOption = options[ key ] = $.widget.extend( {}, this.options[ key ] ); + for ( i = 0; i < parts.length - 1; i++ ) { + curOption[ parts[ i ] ] = curOption[ parts[ i ] ] || {}; + curOption = curOption[ parts[ i ] ]; + } + key = parts.pop(); + if ( arguments.length === 1 ) { + return curOption[ key ] === undefined ? null : curOption[ key ]; + } + curOption[ key ] = value; + } else { + if ( arguments.length === 1 ) { + return this.options[ key ] === undefined ? null : this.options[ key ]; + } + options[ key ] = value; + } + } + + this._setOptions( options ); + + return this; + }, + _setOptions: function( options ) { + var key; + + for ( key in options ) { + this._setOption( key, options[ key ] ); + } + + return this; + }, + _setOption: function( key, value ) { + this.options[ key ] = value; + + if ( key === "disabled" ) { + this.widget() + .toggleClass( this.widgetFullName + "-disabled ui-state-disabled", !!value ) + .attr( "aria-disabled", value ); + this.hoverable.removeClass( "ui-state-hover" ); + this.focusable.removeClass( "ui-state-focus" ); + } + + return this; + }, + + enable: function() { + return this._setOption( "disabled", false ); + }, + disable: function() { + return this._setOption( "disabled", true ); + }, + + _on: function( suppressDisabledCheck, element, handlers ) { + var delegateElement, + instance = this; + + // no suppressDisabledCheck flag, shuffle arguments + if ( typeof suppressDisabledCheck !== "boolean" ) { + handlers = element; + element = suppressDisabledCheck; + suppressDisabledCheck = false; + } + + // no element argument, shuffle and use this.element + if ( !handlers ) { + handlers = element; + element = this.element; + delegateElement = this.widget(); + } else { + // accept selectors, DOM elements + element = delegateElement = $( element ); + this.bindings = this.bindings.add( element ); + } + + $.each( handlers, function( event, handler ) { + function handlerProxy() { + // allow widgets to customize the disabled handling + // - disabled as an array instead of boolean + // - disabled class as method for disabling individual parts + if ( !suppressDisabledCheck && + ( instance.options.disabled === true || + $( this ).hasClass( "ui-state-disabled" ) ) ) { + return; + } + return ( typeof handler === "string" ? instance[ handler ] : handler ) + .apply( instance, arguments ); + } + + // copy the guid so direct unbinding works + if ( typeof handler !== "string" ) { + handlerProxy.guid = handler.guid = + handler.guid || handlerProxy.guid || $.guid++; + } + + var match = event.match( /^(\w+)\s*(.*)$/ ), + eventName = match[1] + instance.eventNamespace, + selector = match[2]; + if ( selector ) { + delegateElement.delegate( selector, eventName, handlerProxy ); + } else { + element.bind( eventName, handlerProxy ); + } + }); + }, + + _off: function( element, eventName ) { + eventName = (eventName || "").split( " " ).join( this.eventNamespace + " " ) + this.eventNamespace; + element.unbind( eventName ).undelegate( eventName ); + }, + + _delay: function( handler, delay ) { + function handlerProxy() { + return ( typeof handler === "string" ? instance[ handler ] : handler ) + .apply( instance, arguments ); + } + var instance = this; + return setTimeout( handlerProxy, delay || 0 ); + }, + + _hoverable: function( element ) { + this.hoverable = this.hoverable.add( element ); + this._on( element, { + mouseenter: function( event ) { + $( event.currentTarget ).addClass( "ui-state-hover" ); + }, + mouseleave: function( event ) { + $( event.currentTarget ).removeClass( "ui-state-hover" ); + } + }); + }, + + _focusable: function( element ) { + this.focusable = this.focusable.add( element ); + this._on( element, { + focusin: function( event ) { + $( event.currentTarget ).addClass( "ui-state-focus" ); + }, + focusout: function( event ) { + $( event.currentTarget ).removeClass( "ui-state-focus" ); + } + }); + }, + + _trigger: function( type, event, data ) { + var prop, orig, + callback = this.options[ type ]; + + data = data || {}; + event = $.Event( event ); + event.type = ( type === this.widgetEventPrefix ? + type : + this.widgetEventPrefix + type ).toLowerCase(); + // the original event may come from any element + // so we need to reset the target on the new event + event.target = this.element[ 0 ]; + + // copy original event properties over to the new event + orig = event.originalEvent; + if ( orig ) { + for ( prop in orig ) { + if ( !( prop in event ) ) { + event[ prop ] = orig[ prop ]; + } + } + } + + this.element.trigger( event, data ); + return !( $.isFunction( callback ) && + callback.apply( this.element[0], [ event ].concat( data ) ) === false || + event.isDefaultPrevented() ); + } +}; + +$.each( { show: "fadeIn", hide: "fadeOut" }, function( method, defaultEffect ) { + $.Widget.prototype[ "_" + method ] = function( element, options, callback ) { + if ( typeof options === "string" ) { + options = { effect: options }; + } + var hasOptions, + effectName = !options ? + method : + options === true || typeof options === "number" ? + defaultEffect : + options.effect || defaultEffect; + options = options || {}; + if ( typeof options === "number" ) { + options = { duration: options }; + } + hasOptions = !$.isEmptyObject( options ); + options.complete = callback; + if ( options.delay ) { + element.delay( options.delay ); + } + if ( hasOptions && $.effects && $.effects.effect[ effectName ] ) { + element[ method ]( options ); + } else if ( effectName !== method && element[ effectName ] ) { + element[ effectName ]( options.duration, options.easing, callback ); + } else { + element.queue(function( next ) { + $( this )[ method ](); + if ( callback ) { + callback.call( element[ 0 ] ); + } + next(); + }); + } + }; +}); + +})( jQuery ); +(function( $, undefined ) { + +var mouseHandled = false; +$( document ).mouseup( function() { + mouseHandled = false; +}); + +$.widget("ui.mouse", { + version: "1.10.4", + options: { + cancel: "input,textarea,button,select,option", + distance: 1, + delay: 0 + }, + _mouseInit: function() { + var that = this; + + this.element + .bind("mousedown."+this.widgetName, function(event) { + return that._mouseDown(event); + }) + .bind("click."+this.widgetName, function(event) { + if (true === $.data(event.target, that.widgetName + ".preventClickEvent")) { + $.removeData(event.target, that.widgetName + ".preventClickEvent"); + event.stopImmediatePropagation(); + return false; + } + }); + + this.started = false; + }, + + // TODO: make sure destroying one instance of mouse doesn't mess with + // other instances of mouse + _mouseDestroy: function() { + this.element.unbind("."+this.widgetName); + if ( this._mouseMoveDelegate ) { + $(document) + .unbind("mousemove."+this.widgetName, this._mouseMoveDelegate) + .unbind("mouseup."+this.widgetName, this._mouseUpDelegate); + } + }, + + _mouseDown: function(event) { + // don't let more than one widget handle mouseStart + if( mouseHandled ) { return; } + + // we may have missed mouseup (out of window) + (this._mouseStarted && this._mouseUp(event)); + + this._mouseDownEvent = event; + + var that = this, + btnIsLeft = (event.which === 1), + // event.target.nodeName works around a bug in IE 8 with + // disabled inputs (#7620) + elIsCancel = (typeof this.options.cancel === "string" && event.target.nodeName ? $(event.target).closest(this.options.cancel).length : false); + if (!btnIsLeft || elIsCancel || !this._mouseCapture(event)) { + return true; + } + + this.mouseDelayMet = !this.options.delay; + if (!this.mouseDelayMet) { + this._mouseDelayTimer = setTimeout(function() { + that.mouseDelayMet = true; + }, this.options.delay); + } + + if (this._mouseDistanceMet(event) && this._mouseDelayMet(event)) { + this._mouseStarted = (this._mouseStart(event) !== false); + if (!this._mouseStarted) { + event.preventDefault(); + return true; + } + } + + // Click event may never have fired (Gecko & Opera) + if (true === $.data(event.target, this.widgetName + ".preventClickEvent")) { + $.removeData(event.target, this.widgetName + ".preventClickEvent"); + } + + // these delegates are required to keep context + this._mouseMoveDelegate = function(event) { + return that._mouseMove(event); + }; + this._mouseUpDelegate = function(event) { + return that._mouseUp(event); + }; + $(document) + .bind("mousemove."+this.widgetName, this._mouseMoveDelegate) + .bind("mouseup."+this.widgetName, this._mouseUpDelegate); + + event.preventDefault(); + + mouseHandled = true; + return true; + }, + + _mouseMove: function(event) { + // IE mouseup check - mouseup happened when mouse was out of window + if ($.ui.ie && ( !document.documentMode || document.documentMode < 9 ) && !event.button) { + return this._mouseUp(event); + } + + if (this._mouseStarted) { + this._mouseDrag(event); + return event.preventDefault(); + } + + if (this._mouseDistanceMet(event) && this._mouseDelayMet(event)) { + this._mouseStarted = + (this._mouseStart(this._mouseDownEvent, event) !== false); + (this._mouseStarted ? this._mouseDrag(event) : this._mouseUp(event)); + } + + return !this._mouseStarted; + }, + + _mouseUp: function(event) { + $(document) + .unbind("mousemove."+this.widgetName, this._mouseMoveDelegate) + .unbind("mouseup."+this.widgetName, this._mouseUpDelegate); + + if (this._mouseStarted) { + this._mouseStarted = false; + + if (event.target === this._mouseDownEvent.target) { + $.data(event.target, this.widgetName + ".preventClickEvent", true); + } + + this._mouseStop(event); + } + + return false; + }, + + _mouseDistanceMet: function(event) { + return (Math.max( + Math.abs(this._mouseDownEvent.pageX - event.pageX), + Math.abs(this._mouseDownEvent.pageY - event.pageY) + ) >= this.options.distance + ); + }, + + _mouseDelayMet: function(/* event */) { + return this.mouseDelayMet; + }, + + // These are placeholder methods, to be overriden by extending plugin + _mouseStart: function(/* event */) {}, + _mouseDrag: function(/* event */) {}, + _mouseStop: function(/* event */) {}, + _mouseCapture: function(/* event */) { return true; } +}); + +})(jQuery); +(function( $, undefined ) { + +$.ui = $.ui || {}; + +var cachedScrollbarWidth, + max = Math.max, + abs = Math.abs, + round = Math.round, + rhorizontal = /left|center|right/, + rvertical = /top|center|bottom/, + roffset = /[\+\-]\d+(\.[\d]+)?%?/, + rposition = /^\w+/, + rpercent = /%$/, + _position = $.fn.position; + +function getOffsets( offsets, width, height ) { + return [ + parseFloat( offsets[ 0 ] ) * ( rpercent.test( offsets[ 0 ] ) ? width / 100 : 1 ), + parseFloat( offsets[ 1 ] ) * ( rpercent.test( offsets[ 1 ] ) ? height / 100 : 1 ) + ]; +} + +function parseCss( element, property ) { + return parseInt( $.css( element, property ), 10 ) || 0; +} + +function getDimensions( elem ) { + var raw = elem[0]; + if ( raw.nodeType === 9 ) { + return { + width: elem.width(), + height: elem.height(), + offset: { top: 0, left: 0 } + }; + } + if ( $.isWindow( raw ) ) { + return { + width: elem.width(), + height: elem.height(), + offset: { top: elem.scrollTop(), left: elem.scrollLeft() } + }; + } + if ( raw.preventDefault ) { + return { + width: 0, + height: 0, + offset: { top: raw.pageY, left: raw.pageX } + }; + } + return { + width: elem.outerWidth(), + height: elem.outerHeight(), + offset: elem.offset() + }; +} + +$.position = { + scrollbarWidth: function() { + if ( cachedScrollbarWidth !== undefined ) { + return cachedScrollbarWidth; + } + var w1, w2, + div = $( "
" ), + innerDiv = div.children()[0]; + + $( "body" ).append( div ); + w1 = innerDiv.offsetWidth; + div.css( "overflow", "scroll" ); + + w2 = innerDiv.offsetWidth; + + if ( w1 === w2 ) { + w2 = div[0].clientWidth; + } + + div.remove(); + + return (cachedScrollbarWidth = w1 - w2); + }, + getScrollInfo: function( within ) { + var overflowX = within.isWindow || within.isDocument ? "" : + within.element.css( "overflow-x" ), + overflowY = within.isWindow || within.isDocument ? "" : + within.element.css( "overflow-y" ), + hasOverflowX = overflowX === "scroll" || + ( overflowX === "auto" && within.width < within.element[0].scrollWidth ), + hasOverflowY = overflowY === "scroll" || + ( overflowY === "auto" && within.height < within.element[0].scrollHeight ); + return { + width: hasOverflowY ? $.position.scrollbarWidth() : 0, + height: hasOverflowX ? $.position.scrollbarWidth() : 0 + }; + }, + getWithinInfo: function( element ) { + var withinElement = $( element || window ), + isWindow = $.isWindow( withinElement[0] ), + isDocument = !!withinElement[ 0 ] && withinElement[ 0 ].nodeType === 9; + return { + element: withinElement, + isWindow: isWindow, + isDocument: isDocument, + offset: withinElement.offset() || { left: 0, top: 0 }, + scrollLeft: withinElement.scrollLeft(), + scrollTop: withinElement.scrollTop(), + width: isWindow ? withinElement.width() : withinElement.outerWidth(), + height: isWindow ? withinElement.height() : withinElement.outerHeight() + }; + } +}; + +$.fn.position = function( options ) { + if ( !options || !options.of ) { + return _position.apply( this, arguments ); + } + + // make a copy, we don't want to modify arguments + options = $.extend( {}, options ); + + var atOffset, targetWidth, targetHeight, targetOffset, basePosition, dimensions, + target = $( options.of ), + within = $.position.getWithinInfo( options.within ), + scrollInfo = $.position.getScrollInfo( within ), + collision = ( options.collision || "flip" ).split( " " ), + offsets = {}; + + dimensions = getDimensions( target ); + if ( target[0].preventDefault ) { + // force left top to allow flipping + options.at = "left top"; + } + targetWidth = dimensions.width; + targetHeight = dimensions.height; + targetOffset = dimensions.offset; + // clone to reuse original targetOffset later + basePosition = $.extend( {}, targetOffset ); + + // force my and at to have valid horizontal and vertical positions + // if a value is missing or invalid, it will be converted to center + $.each( [ "my", "at" ], function() { + var pos = ( options[ this ] || "" ).split( " " ), + horizontalOffset, + verticalOffset; + + if ( pos.length === 1) { + pos = rhorizontal.test( pos[ 0 ] ) ? + pos.concat( [ "center" ] ) : + rvertical.test( pos[ 0 ] ) ? + [ "center" ].concat( pos ) : + [ "center", "center" ]; + } + pos[ 0 ] = rhorizontal.test( pos[ 0 ] ) ? pos[ 0 ] : "center"; + pos[ 1 ] = rvertical.test( pos[ 1 ] ) ? pos[ 1 ] : "center"; + + // calculate offsets + horizontalOffset = roffset.exec( pos[ 0 ] ); + verticalOffset = roffset.exec( pos[ 1 ] ); + offsets[ this ] = [ + horizontalOffset ? horizontalOffset[ 0 ] : 0, + verticalOffset ? verticalOffset[ 0 ] : 0 + ]; + + // reduce to just the positions without the offsets + options[ this ] = [ + rposition.exec( pos[ 0 ] )[ 0 ], + rposition.exec( pos[ 1 ] )[ 0 ] + ]; + }); + + // normalize collision option + if ( collision.length === 1 ) { + collision[ 1 ] = collision[ 0 ]; + } + + if ( options.at[ 0 ] === "right" ) { + basePosition.left += targetWidth; + } else if ( options.at[ 0 ] === "center" ) { + basePosition.left += targetWidth / 2; + } + + if ( options.at[ 1 ] === "bottom" ) { + basePosition.top += targetHeight; + } else if ( options.at[ 1 ] === "center" ) { + basePosition.top += targetHeight / 2; + } + + atOffset = getOffsets( offsets.at, targetWidth, targetHeight ); + basePosition.left += atOffset[ 0 ]; + basePosition.top += atOffset[ 1 ]; + + return this.each(function() { + var collisionPosition, using, + elem = $( this ), + elemWidth = elem.outerWidth(), + elemHeight = elem.outerHeight(), + marginLeft = parseCss( this, "marginLeft" ), + marginTop = parseCss( this, "marginTop" ), + collisionWidth = elemWidth + marginLeft + parseCss( this, "marginRight" ) + scrollInfo.width, + collisionHeight = elemHeight + marginTop + parseCss( this, "marginBottom" ) + scrollInfo.height, + position = $.extend( {}, basePosition ), + myOffset = getOffsets( offsets.my, elem.outerWidth(), elem.outerHeight() ); + + if ( options.my[ 0 ] === "right" ) { + position.left -= elemWidth; + } else if ( options.my[ 0 ] === "center" ) { + position.left -= elemWidth / 2; + } + + if ( options.my[ 1 ] === "bottom" ) { + position.top -= elemHeight; + } else if ( options.my[ 1 ] === "center" ) { + position.top -= elemHeight / 2; + } + + position.left += myOffset[ 0 ]; + position.top += myOffset[ 1 ]; + + // if the browser doesn't support fractions, then round for consistent results + if ( !$.support.offsetFractions ) { + position.left = round( position.left ); + position.top = round( position.top ); + } + + collisionPosition = { + marginLeft: marginLeft, + marginTop: marginTop + }; + + $.each( [ "left", "top" ], function( i, dir ) { + if ( $.ui.position[ collision[ i ] ] ) { + $.ui.position[ collision[ i ] ][ dir ]( position, { + targetWidth: targetWidth, + targetHeight: targetHeight, + elemWidth: elemWidth, + elemHeight: elemHeight, + collisionPosition: collisionPosition, + collisionWidth: collisionWidth, + collisionHeight: collisionHeight, + offset: [ atOffset[ 0 ] + myOffset[ 0 ], atOffset [ 1 ] + myOffset[ 1 ] ], + my: options.my, + at: options.at, + within: within, + elem : elem + }); + } + }); + + if ( options.using ) { + // adds feedback as second argument to using callback, if present + using = function( props ) { + var left = targetOffset.left - position.left, + right = left + targetWidth - elemWidth, + top = targetOffset.top - position.top, + bottom = top + targetHeight - elemHeight, + feedback = { + target: { + element: target, + left: targetOffset.left, + top: targetOffset.top, + width: targetWidth, + height: targetHeight + }, + element: { + element: elem, + left: position.left, + top: position.top, + width: elemWidth, + height: elemHeight + }, + horizontal: right < 0 ? "left" : left > 0 ? "right" : "center", + vertical: bottom < 0 ? "top" : top > 0 ? "bottom" : "middle" + }; + if ( targetWidth < elemWidth && abs( left + right ) < targetWidth ) { + feedback.horizontal = "center"; + } + if ( targetHeight < elemHeight && abs( top + bottom ) < targetHeight ) { + feedback.vertical = "middle"; + } + if ( max( abs( left ), abs( right ) ) > max( abs( top ), abs( bottom ) ) ) { + feedback.important = "horizontal"; + } else { + feedback.important = "vertical"; + } + options.using.call( this, props, feedback ); + }; + } + + elem.offset( $.extend( position, { using: using } ) ); + }); +}; + +$.ui.position = { + fit: { + left: function( position, data ) { + var within = data.within, + withinOffset = within.isWindow ? within.scrollLeft : within.offset.left, + outerWidth = within.width, + collisionPosLeft = position.left - data.collisionPosition.marginLeft, + overLeft = withinOffset - collisionPosLeft, + overRight = collisionPosLeft + data.collisionWidth - outerWidth - withinOffset, + newOverRight; + + // element is wider than within + if ( data.collisionWidth > outerWidth ) { + // element is initially over the left side of within + if ( overLeft > 0 && overRight <= 0 ) { + newOverRight = position.left + overLeft + data.collisionWidth - outerWidth - withinOffset; + position.left += overLeft - newOverRight; + // element is initially over right side of within + } else if ( overRight > 0 && overLeft <= 0 ) { + position.left = withinOffset; + // element is initially over both left and right sides of within + } else { + if ( overLeft > overRight ) { + position.left = withinOffset + outerWidth - data.collisionWidth; + } else { + position.left = withinOffset; + } + } + // too far left -> align with left edge + } else if ( overLeft > 0 ) { + position.left += overLeft; + // too far right -> align with right edge + } else if ( overRight > 0 ) { + position.left -= overRight; + // adjust based on position and margin + } else { + position.left = max( position.left - collisionPosLeft, position.left ); + } + }, + top: function( position, data ) { + var within = data.within, + withinOffset = within.isWindow ? within.scrollTop : within.offset.top, + outerHeight = data.within.height, + collisionPosTop = position.top - data.collisionPosition.marginTop, + overTop = withinOffset - collisionPosTop, + overBottom = collisionPosTop + data.collisionHeight - outerHeight - withinOffset, + newOverBottom; + + // element is taller than within + if ( data.collisionHeight > outerHeight ) { + // element is initially over the top of within + if ( overTop > 0 && overBottom <= 0 ) { + newOverBottom = position.top + overTop + data.collisionHeight - outerHeight - withinOffset; + position.top += overTop - newOverBottom; + // element is initially over bottom of within + } else if ( overBottom > 0 && overTop <= 0 ) { + position.top = withinOffset; + // element is initially over both top and bottom of within + } else { + if ( overTop > overBottom ) { + position.top = withinOffset + outerHeight - data.collisionHeight; + } else { + position.top = withinOffset; + } + } + // too far up -> align with top + } else if ( overTop > 0 ) { + position.top += overTop; + // too far down -> align with bottom edge + } else if ( overBottom > 0 ) { + position.top -= overBottom; + // adjust based on position and margin + } else { + position.top = max( position.top - collisionPosTop, position.top ); + } + } + }, + flip: { + left: function( position, data ) { + var within = data.within, + withinOffset = within.offset.left + within.scrollLeft, + outerWidth = within.width, + offsetLeft = within.isWindow ? within.scrollLeft : within.offset.left, + collisionPosLeft = position.left - data.collisionPosition.marginLeft, + overLeft = collisionPosLeft - offsetLeft, + overRight = collisionPosLeft + data.collisionWidth - outerWidth - offsetLeft, + myOffset = data.my[ 0 ] === "left" ? + -data.elemWidth : + data.my[ 0 ] === "right" ? + data.elemWidth : + 0, + atOffset = data.at[ 0 ] === "left" ? + data.targetWidth : + data.at[ 0 ] === "right" ? + -data.targetWidth : + 0, + offset = -2 * data.offset[ 0 ], + newOverRight, + newOverLeft; + + if ( overLeft < 0 ) { + newOverRight = position.left + myOffset + atOffset + offset + data.collisionWidth - outerWidth - withinOffset; + if ( newOverRight < 0 || newOverRight < abs( overLeft ) ) { + position.left += myOffset + atOffset + offset; + } + } + else if ( overRight > 0 ) { + newOverLeft = position.left - data.collisionPosition.marginLeft + myOffset + atOffset + offset - offsetLeft; + if ( newOverLeft > 0 || abs( newOverLeft ) < overRight ) { + position.left += myOffset + atOffset + offset; + } + } + }, + top: function( position, data ) { + var within = data.within, + withinOffset = within.offset.top + within.scrollTop, + outerHeight = within.height, + offsetTop = within.isWindow ? within.scrollTop : within.offset.top, + collisionPosTop = position.top - data.collisionPosition.marginTop, + overTop = collisionPosTop - offsetTop, + overBottom = collisionPosTop + data.collisionHeight - outerHeight - offsetTop, + top = data.my[ 1 ] === "top", + myOffset = top ? + -data.elemHeight : + data.my[ 1 ] === "bottom" ? + data.elemHeight : + 0, + atOffset = data.at[ 1 ] === "top" ? + data.targetHeight : + data.at[ 1 ] === "bottom" ? + -data.targetHeight : + 0, + offset = -2 * data.offset[ 1 ], + newOverTop, + newOverBottom; + if ( overTop < 0 ) { + newOverBottom = position.top + myOffset + atOffset + offset + data.collisionHeight - outerHeight - withinOffset; + if ( ( position.top + myOffset + atOffset + offset) > overTop && ( newOverBottom < 0 || newOverBottom < abs( overTop ) ) ) { + position.top += myOffset + atOffset + offset; + } + } + else if ( overBottom > 0 ) { + newOverTop = position.top - data.collisionPosition.marginTop + myOffset + atOffset + offset - offsetTop; + if ( ( position.top + myOffset + atOffset + offset) > overBottom && ( newOverTop > 0 || abs( newOverTop ) < overBottom ) ) { + position.top += myOffset + atOffset + offset; + } + } + } + }, + flipfit: { + left: function() { + $.ui.position.flip.left.apply( this, arguments ); + $.ui.position.fit.left.apply( this, arguments ); + }, + top: function() { + $.ui.position.flip.top.apply( this, arguments ); + $.ui.position.fit.top.apply( this, arguments ); + } + } +}; + +// fraction support test +(function () { + var testElement, testElementParent, testElementStyle, offsetLeft, i, + body = document.getElementsByTagName( "body" )[ 0 ], + div = document.createElement( "div" ); + + //Create a "fake body" for testing based on method used in jQuery.support + testElement = document.createElement( body ? "div" : "body" ); + testElementStyle = { + visibility: "hidden", + width: 0, + height: 0, + border: 0, + margin: 0, + background: "none" + }; + if ( body ) { + $.extend( testElementStyle, { + position: "absolute", + left: "-1000px", + top: "-1000px" + }); + } + for ( i in testElementStyle ) { + testElement.style[ i ] = testElementStyle[ i ]; + } + testElement.appendChild( div ); + testElementParent = body || document.documentElement; + testElementParent.insertBefore( testElement, testElementParent.firstChild ); + + div.style.cssText = "position: absolute; left: 10.7432222px;"; + + offsetLeft = $( div ).offset().left; + $.support.offsetFractions = offsetLeft > 10 && offsetLeft < 11; + + testElement.innerHTML = ""; + testElementParent.removeChild( testElement ); +})(); + +}( jQuery ) ); +(function( $, undefined ) { + +var uid = 0, + hideProps = {}, + showProps = {}; + +hideProps.height = hideProps.paddingTop = hideProps.paddingBottom = + hideProps.borderTopWidth = hideProps.borderBottomWidth = "hide"; +showProps.height = showProps.paddingTop = showProps.paddingBottom = + showProps.borderTopWidth = showProps.borderBottomWidth = "show"; + +$.widget( "ui.accordion", { + version: "1.10.4", + options: { + active: 0, + animate: {}, + collapsible: false, + event: "click", + header: "> li > :first-child,> :not(li):even", + heightStyle: "auto", + icons: { + activeHeader: "ui-icon-triangle-1-s", + header: "ui-icon-triangle-1-e" + }, + + // callbacks + activate: null, + beforeActivate: null + }, + + _create: function() { + var options = this.options; + this.prevShow = this.prevHide = $(); + this.element.addClass( "ui-accordion ui-widget ui-helper-reset" ) + // ARIA + .attr( "role", "tablist" ); + + // don't allow collapsible: false and active: false / null + if ( !options.collapsible && (options.active === false || options.active == null) ) { + options.active = 0; + } + + this._processPanels(); + // handle negative values + if ( options.active < 0 ) { + options.active += this.headers.length; + } + this._refresh(); + }, + + _getCreateEventData: function() { + return { + header: this.active, + panel: !this.active.length ? $() : this.active.next(), + content: !this.active.length ? $() : this.active.next() + }; + }, + + _createIcons: function() { + var icons = this.options.icons; + if ( icons ) { + $( "" ) + .addClass( "ui-accordion-header-icon ui-icon " + icons.header ) + .prependTo( this.headers ); + this.active.children( ".ui-accordion-header-icon" ) + .removeClass( icons.header ) + .addClass( icons.activeHeader ); + this.headers.addClass( "ui-accordion-icons" ); + } + }, + + _destroyIcons: function() { + this.headers + .removeClass( "ui-accordion-icons" ) + .children( ".ui-accordion-header-icon" ) + .remove(); + }, + + _destroy: function() { + var contents; + + // clean up main element + this.element + .removeClass( "ui-accordion ui-widget ui-helper-reset" ) + .removeAttr( "role" ); + + // clean up headers + this.headers + .removeClass( "ui-accordion-header ui-accordion-header-active ui-helper-reset ui-state-default ui-corner-all ui-state-active ui-state-disabled ui-corner-top" ) + .removeAttr( "role" ) + .removeAttr( "aria-expanded" ) + .removeAttr( "aria-selected" ) + .removeAttr( "aria-controls" ) + .removeAttr( "tabIndex" ) + .each(function() { + if ( /^ui-accordion/.test( this.id ) ) { + this.removeAttribute( "id" ); + } + }); + this._destroyIcons(); + + // clean up content panels + contents = this.headers.next() + .css( "display", "" ) + .removeAttr( "role" ) + .removeAttr( "aria-hidden" ) + .removeAttr( "aria-labelledby" ) + .removeClass( "ui-helper-reset ui-widget-content ui-corner-bottom ui-accordion-content ui-accordion-content-active ui-state-disabled" ) + .each(function() { + if ( /^ui-accordion/.test( this.id ) ) { + this.removeAttribute( "id" ); + } + }); + if ( this.options.heightStyle !== "content" ) { + contents.css( "height", "" ); + } + }, + + _setOption: function( key, value ) { + if ( key === "active" ) { + // _activate() will handle invalid values and update this.options + this._activate( value ); + return; + } + + if ( key === "event" ) { + if ( this.options.event ) { + this._off( this.headers, this.options.event ); + } + this._setupEvents( value ); + } + + this._super( key, value ); + + // setting collapsible: false while collapsed; open first panel + if ( key === "collapsible" && !value && this.options.active === false ) { + this._activate( 0 ); + } + + if ( key === "icons" ) { + this._destroyIcons(); + if ( value ) { + this._createIcons(); + } + } + + // #5332 - opacity doesn't cascade to positioned elements in IE + // so we need to add the disabled class to the headers and panels + if ( key === "disabled" ) { + this.headers.add( this.headers.next() ) + .toggleClass( "ui-state-disabled", !!value ); + } + }, + + _keydown: function( event ) { + if ( event.altKey || event.ctrlKey ) { + return; + } + + var keyCode = $.ui.keyCode, + length = this.headers.length, + currentIndex = this.headers.index( event.target ), + toFocus = false; + + switch ( event.keyCode ) { + case keyCode.RIGHT: + case keyCode.DOWN: + toFocus = this.headers[ ( currentIndex + 1 ) % length ]; + break; + case keyCode.LEFT: + case keyCode.UP: + toFocus = this.headers[ ( currentIndex - 1 + length ) % length ]; + break; + case keyCode.SPACE: + case keyCode.ENTER: + this._eventHandler( event ); + break; + case keyCode.HOME: + toFocus = this.headers[ 0 ]; + break; + case keyCode.END: + toFocus = this.headers[ length - 1 ]; + break; + } + + if ( toFocus ) { + $( event.target ).attr( "tabIndex", -1 ); + $( toFocus ).attr( "tabIndex", 0 ); + toFocus.focus(); + event.preventDefault(); + } + }, + + _panelKeyDown : function( event ) { + if ( event.keyCode === $.ui.keyCode.UP && event.ctrlKey ) { + $( event.currentTarget ).prev().focus(); + } + }, + + refresh: function() { + var options = this.options; + this._processPanels(); + + // was collapsed or no panel + if ( ( options.active === false && options.collapsible === true ) || !this.headers.length ) { + options.active = false; + this.active = $(); + // active false only when collapsible is true + } else if ( options.active === false ) { + this._activate( 0 ); + // was active, but active panel is gone + } else if ( this.active.length && !$.contains( this.element[ 0 ], this.active[ 0 ] ) ) { + // all remaining panel are disabled + if ( this.headers.length === this.headers.find(".ui-state-disabled").length ) { + options.active = false; + this.active = $(); + // activate previous panel + } else { + this._activate( Math.max( 0, options.active - 1 ) ); + } + // was active, active panel still exists + } else { + // make sure active index is correct + options.active = this.headers.index( this.active ); + } + + this._destroyIcons(); + + this._refresh(); + }, + + _processPanels: function() { + this.headers = this.element.find( this.options.header ) + .addClass( "ui-accordion-header ui-helper-reset ui-state-default ui-corner-all" ); + + this.headers.next() + .addClass( "ui-accordion-content ui-helper-reset ui-widget-content ui-corner-bottom" ) + .filter(":not(.ui-accordion-content-active)") + .hide(); + }, + + _refresh: function() { + var maxHeight, + options = this.options, + heightStyle = options.heightStyle, + parent = this.element.parent(), + accordionId = this.accordionId = "ui-accordion-" + + (this.element.attr( "id" ) || ++uid); + + this.active = this._findActive( options.active ) + .addClass( "ui-accordion-header-active ui-state-active ui-corner-top" ) + .removeClass( "ui-corner-all" ); + this.active.next() + .addClass( "ui-accordion-content-active" ) + .show(); + + this.headers + .attr( "role", "tab" ) + .each(function( i ) { + var header = $( this ), + headerId = header.attr( "id" ), + panel = header.next(), + panelId = panel.attr( "id" ); + if ( !headerId ) { + headerId = accordionId + "-header-" + i; + header.attr( "id", headerId ); + } + if ( !panelId ) { + panelId = accordionId + "-panel-" + i; + panel.attr( "id", panelId ); + } + header.attr( "aria-controls", panelId ); + panel.attr( "aria-labelledby", headerId ); + }) + .next() + .attr( "role", "tabpanel" ); + + this.headers + .not( this.active ) + .attr({ + "aria-selected": "false", + "aria-expanded": "false", + tabIndex: -1 + }) + .next() + .attr({ + "aria-hidden": "true" + }) + .hide(); + + // make sure at least one header is in the tab order + if ( !this.active.length ) { + this.headers.eq( 0 ).attr( "tabIndex", 0 ); + } else { + this.active.attr({ + "aria-selected": "true", + "aria-expanded": "true", + tabIndex: 0 + }) + .next() + .attr({ + "aria-hidden": "false" + }); + } + + this._createIcons(); + + this._setupEvents( options.event ); + + if ( heightStyle === "fill" ) { + maxHeight = parent.height(); + this.element.siblings( ":visible" ).each(function() { + var elem = $( this ), + position = elem.css( "position" ); + + if ( position === "absolute" || position === "fixed" ) { + return; + } + maxHeight -= elem.outerHeight( true ); + }); + + this.headers.each(function() { + maxHeight -= $( this ).outerHeight( true ); + }); + + this.headers.next() + .each(function() { + $( this ).height( Math.max( 0, maxHeight - + $( this ).innerHeight() + $( this ).height() ) ); + }) + .css( "overflow", "auto" ); + } else if ( heightStyle === "auto" ) { + maxHeight = 0; + this.headers.next() + .each(function() { + maxHeight = Math.max( maxHeight, $( this ).css( "height", "" ).height() ); + }) + .height( maxHeight ); + } + }, + + _activate: function( index ) { + var active = this._findActive( index )[ 0 ]; + + // trying to activate the already active panel + if ( active === this.active[ 0 ] ) { + return; + } + + // trying to collapse, simulate a click on the currently active header + active = active || this.active[ 0 ]; + + this._eventHandler({ + target: active, + currentTarget: active, + preventDefault: $.noop + }); + }, + + _findActive: function( selector ) { + return typeof selector === "number" ? this.headers.eq( selector ) : $(); + }, + + _setupEvents: function( event ) { + var events = { + keydown: "_keydown" + }; + if ( event ) { + $.each( event.split(" "), function( index, eventName ) { + events[ eventName ] = "_eventHandler"; + }); + } + + this._off( this.headers.add( this.headers.next() ) ); + this._on( this.headers, events ); + this._on( this.headers.next(), { keydown: "_panelKeyDown" }); + this._hoverable( this.headers ); + this._focusable( this.headers ); + }, + + _eventHandler: function( event ) { + var options = this.options, + active = this.active, + clicked = $( event.currentTarget ), + clickedIsActive = clicked[ 0 ] === active[ 0 ], + collapsing = clickedIsActive && options.collapsible, + toShow = collapsing ? $() : clicked.next(), + toHide = active.next(), + eventData = { + oldHeader: active, + oldPanel: toHide, + newHeader: collapsing ? $() : clicked, + newPanel: toShow + }; + + event.preventDefault(); + + if ( + // click on active header, but not collapsible + ( clickedIsActive && !options.collapsible ) || + // allow canceling activation + ( this._trigger( "beforeActivate", event, eventData ) === false ) ) { + return; + } + + options.active = collapsing ? false : this.headers.index( clicked ); + + // when the call to ._toggle() comes after the class changes + // it causes a very odd bug in IE 8 (see #6720) + this.active = clickedIsActive ? $() : clicked; + this._toggle( eventData ); + + // switch classes + // corner classes on the previously active header stay after the animation + active.removeClass( "ui-accordion-header-active ui-state-active" ); + if ( options.icons ) { + active.children( ".ui-accordion-header-icon" ) + .removeClass( options.icons.activeHeader ) + .addClass( options.icons.header ); + } + + if ( !clickedIsActive ) { + clicked + .removeClass( "ui-corner-all" ) + .addClass( "ui-accordion-header-active ui-state-active ui-corner-top" ); + if ( options.icons ) { + clicked.children( ".ui-accordion-header-icon" ) + .removeClass( options.icons.header ) + .addClass( options.icons.activeHeader ); + } + + clicked + .next() + .addClass( "ui-accordion-content-active" ); + } + }, + + _toggle: function( data ) { + var toShow = data.newPanel, + toHide = this.prevShow.length ? this.prevShow : data.oldPanel; + + // handle activating a panel during the animation for another activation + this.prevShow.add( this.prevHide ).stop( true, true ); + this.prevShow = toShow; + this.prevHide = toHide; + + if ( this.options.animate ) { + this._animate( toShow, toHide, data ); + } else { + toHide.hide(); + toShow.show(); + this._toggleComplete( data ); + } + + toHide.attr({ + "aria-hidden": "true" + }); + toHide.prev().attr( "aria-selected", "false" ); + // if we're switching panels, remove the old header from the tab order + // if we're opening from collapsed state, remove the previous header from the tab order + // if we're collapsing, then keep the collapsing header in the tab order + if ( toShow.length && toHide.length ) { + toHide.prev().attr({ + "tabIndex": -1, + "aria-expanded": "false" + }); + } else if ( toShow.length ) { + this.headers.filter(function() { + return $( this ).attr( "tabIndex" ) === 0; + }) + .attr( "tabIndex", -1 ); + } + + toShow + .attr( "aria-hidden", "false" ) + .prev() + .attr({ + "aria-selected": "true", + tabIndex: 0, + "aria-expanded": "true" + }); + }, + + _animate: function( toShow, toHide, data ) { + var total, easing, duration, + that = this, + adjust = 0, + down = toShow.length && + ( !toHide.length || ( toShow.index() < toHide.index() ) ), + animate = this.options.animate || {}, + options = down && animate.down || animate, + complete = function() { + that._toggleComplete( data ); + }; + + if ( typeof options === "number" ) { + duration = options; + } + if ( typeof options === "string" ) { + easing = options; + } + // fall back from options to animation in case of partial down settings + easing = easing || options.easing || animate.easing; + duration = duration || options.duration || animate.duration; + + if ( !toHide.length ) { + return toShow.animate( showProps, duration, easing, complete ); + } + if ( !toShow.length ) { + return toHide.animate( hideProps, duration, easing, complete ); + } + + total = toShow.show().outerHeight(); + toHide.animate( hideProps, { + duration: duration, + easing: easing, + step: function( now, fx ) { + fx.now = Math.round( now ); + } + }); + toShow + .hide() + .animate( showProps, { + duration: duration, + easing: easing, + complete: complete, + step: function( now, fx ) { + fx.now = Math.round( now ); + if ( fx.prop !== "height" ) { + adjust += fx.now; + } else if ( that.options.heightStyle !== "content" ) { + fx.now = Math.round( total - toHide.outerHeight() - adjust ); + adjust = 0; + } + } + }); + }, + + _toggleComplete: function( data ) { + var toHide = data.oldPanel; + + toHide + .removeClass( "ui-accordion-content-active" ) + .prev() + .removeClass( "ui-corner-top" ) + .addClass( "ui-corner-all" ); + + // Work around for rendering bug in IE (#5421) + if ( toHide.length ) { + toHide.parent()[0].className = toHide.parent()[0].className; + } + this._trigger( "activate", null, data ); + } +}); + +})( jQuery ); +(function( $, undefined ) { + +$.widget( "ui.autocomplete", { + version: "1.10.4", + defaultElement: "", + options: { + appendTo: null, + autoFocus: false, + delay: 300, + minLength: 1, + position: { + my: "left top", + at: "left bottom", + collision: "none" + }, + source: null, + + // callbacks + change: null, + close: null, + focus: null, + open: null, + response: null, + search: null, + select: null + }, + + requestIndex: 0, + pending: 0, + + _create: function() { + // Some browsers only repeat keydown events, not keypress events, + // so we use the suppressKeyPress flag to determine if we've already + // handled the keydown event. #7269 + // Unfortunately the code for & in keypress is the same as the up arrow, + // so we use the suppressKeyPressRepeat flag to avoid handling keypress + // events when we know the keydown event was used to modify the + // search term. #7799 + var suppressKeyPress, suppressKeyPressRepeat, suppressInput, + nodeName = this.element[0].nodeName.toLowerCase(), + isTextarea = nodeName === "textarea", + isInput = nodeName === "input"; + + this.isMultiLine = + // Textareas are always multi-line + isTextarea ? true : + // Inputs are always single-line, even if inside a contentEditable element + // IE also treats inputs as contentEditable + isInput ? false : + // All other element types are determined by whether or not they're contentEditable + this.element.prop( "isContentEditable" ); + + this.valueMethod = this.element[ isTextarea || isInput ? "val" : "text" ]; + this.isNewMenu = true; + + this.element + .addClass( "ui-autocomplete-input" ) + .attr( "autocomplete", "off" ); + + this._on( this.element, { + keydown: function( event ) { + if ( this.element.prop( "readOnly" ) ) { + suppressKeyPress = true; + suppressInput = true; + suppressKeyPressRepeat = true; + return; + } + + suppressKeyPress = false; + suppressInput = false; + suppressKeyPressRepeat = false; + var keyCode = $.ui.keyCode; + switch( event.keyCode ) { + case keyCode.PAGE_UP: + suppressKeyPress = true; + this._move( "previousPage", event ); + break; + case keyCode.PAGE_DOWN: + suppressKeyPress = true; + this._move( "nextPage", event ); + break; + case keyCode.UP: + suppressKeyPress = true; + this._keyEvent( "previous", event ); + break; + case keyCode.DOWN: + suppressKeyPress = true; + this._keyEvent( "next", event ); + break; + case keyCode.ENTER: + case keyCode.NUMPAD_ENTER: + // when menu is open and has focus + if ( this.menu.active ) { + // #6055 - Opera still allows the keypress to occur + // which causes forms to submit + suppressKeyPress = true; + event.preventDefault(); + this.menu.select( event ); + } + break; + case keyCode.TAB: + if ( this.menu.active ) { + this.menu.select( event ); + } + break; + case keyCode.ESCAPE: + if ( this.menu.element.is( ":visible" ) ) { + this._value( this.term ); + this.close( event ); + // Different browsers have different default behavior for escape + // Single press can mean undo or clear + // Double press in IE means clear the whole form + event.preventDefault(); + } + break; + default: + suppressKeyPressRepeat = true; + // search timeout should be triggered before the input value is changed + this._searchTimeout( event ); + break; + } + }, + keypress: function( event ) { + if ( suppressKeyPress ) { + suppressKeyPress = false; + if ( !this.isMultiLine || this.menu.element.is( ":visible" ) ) { + event.preventDefault(); + } + return; + } + if ( suppressKeyPressRepeat ) { + return; + } + + // replicate some key handlers to allow them to repeat in Firefox and Opera + var keyCode = $.ui.keyCode; + switch( event.keyCode ) { + case keyCode.PAGE_UP: + this._move( "previousPage", event ); + break; + case keyCode.PAGE_DOWN: + this._move( "nextPage", event ); + break; + case keyCode.UP: + this._keyEvent( "previous", event ); + break; + case keyCode.DOWN: + this._keyEvent( "next", event ); + break; + } + }, + input: function( event ) { + if ( suppressInput ) { + suppressInput = false; + event.preventDefault(); + return; + } + this._searchTimeout( event ); + }, + focus: function() { + this.selectedItem = null; + this.previous = this._value(); + }, + blur: function( event ) { + if ( this.cancelBlur ) { + delete this.cancelBlur; + return; + } + + clearTimeout( this.searching ); + this.close( event ); + this._change( event ); + } + }); + + this._initSource(); + this.menu = $( "
    " ) + .addClass( "ui-autocomplete ui-front" ) + .appendTo( this._appendTo() ) + .menu({ + // disable ARIA support, the live region takes care of that + role: null + }) + .hide() + .data( "ui-menu" ); + + this._on( this.menu.element, { + mousedown: function( event ) { + // prevent moving focus out of the text field + event.preventDefault(); + + // IE doesn't prevent moving focus even with event.preventDefault() + // so we set a flag to know when we should ignore the blur event + this.cancelBlur = true; + this._delay(function() { + delete this.cancelBlur; + }); + + // clicking on the scrollbar causes focus to shift to the body + // but we can't detect a mouseup or a click immediately afterward + // so we have to track the next mousedown and close the menu if + // the user clicks somewhere outside of the autocomplete + var menuElement = this.menu.element[ 0 ]; + if ( !$( event.target ).closest( ".ui-menu-item" ).length ) { + this._delay(function() { + var that = this; + this.document.one( "mousedown", function( event ) { + if ( event.target !== that.element[ 0 ] && + event.target !== menuElement && + !$.contains( menuElement, event.target ) ) { + that.close(); + } + }); + }); + } + }, + menufocus: function( event, ui ) { + // support: Firefox + // Prevent accidental activation of menu items in Firefox (#7024 #9118) + if ( this.isNewMenu ) { + this.isNewMenu = false; + if ( event.originalEvent && /^mouse/.test( event.originalEvent.type ) ) { + this.menu.blur(); + + this.document.one( "mousemove", function() { + $( event.target ).trigger( event.originalEvent ); + }); + + return; + } + } + + var item = ui.item.data( "ui-autocomplete-item" ); + if ( false !== this._trigger( "focus", event, { item: item } ) ) { + // use value to match what will end up in the input, if it was a key event + if ( event.originalEvent && /^key/.test( event.originalEvent.type ) ) { + this._value( item.value ); + } + } else { + // Normally the input is populated with the item's value as the + // menu is navigated, causing screen readers to notice a change and + // announce the item. Since the focus event was canceled, this doesn't + // happen, so we update the live region so that screen readers can + // still notice the change and announce it. + this.liveRegion.text( item.value ); + } + }, + menuselect: function( event, ui ) { + var item = ui.item.data( "ui-autocomplete-item" ), + previous = this.previous; + + // only trigger when focus was lost (click on menu) + if ( this.element[0] !== this.document[0].activeElement ) { + this.element.focus(); + this.previous = previous; + // #6109 - IE triggers two focus events and the second + // is asynchronous, so we need to reset the previous + // term synchronously and asynchronously :-( + this._delay(function() { + this.previous = previous; + this.selectedItem = item; + }); + } + + if ( false !== this._trigger( "select", event, { item: item } ) ) { + this._value( item.value ); + } + // reset the term after the select event + // this allows custom select handling to work properly + this.term = this._value(); + + this.close( event ); + this.selectedItem = item; + } + }); + + this.liveRegion = $( "", { + role: "status", + "aria-live": "polite" + }) + .addClass( "ui-helper-hidden-accessible" ) + .insertBefore( this.element ); + + // turning off autocomplete prevents the browser from remembering the + // value when navigating through history, so we re-enable autocomplete + // if the page is unloaded before the widget is destroyed. #7790 + this._on( this.window, { + beforeunload: function() { + this.element.removeAttr( "autocomplete" ); + } + }); + }, + + _destroy: function() { + clearTimeout( this.searching ); + this.element + .removeClass( "ui-autocomplete-input" ) + .removeAttr( "autocomplete" ); + this.menu.element.remove(); + this.liveRegion.remove(); + }, + + _setOption: function( key, value ) { + this._super( key, value ); + if ( key === "source" ) { + this._initSource(); + } + if ( key === "appendTo" ) { + this.menu.element.appendTo( this._appendTo() ); + } + if ( key === "disabled" && value && this.xhr ) { + this.xhr.abort(); + } + }, + + _appendTo: function() { + var element = this.options.appendTo; + + if ( element ) { + element = element.jquery || element.nodeType ? + $( element ) : + this.document.find( element ).eq( 0 ); + } + + if ( !element ) { + element = this.element.closest( ".ui-front" ); + } + + if ( !element.length ) { + element = this.document[0].body; + } + + return element; + }, + + _initSource: function() { + var array, url, + that = this; + if ( $.isArray(this.options.source) ) { + array = this.options.source; + this.source = function( request, response ) { + response( $.ui.autocomplete.filter( array, request.term ) ); + }; + } else if ( typeof this.options.source === "string" ) { + url = this.options.source; + this.source = function( request, response ) { + if ( that.xhr ) { + that.xhr.abort(); + } + that.xhr = $.ajax({ + url: url, + data: request, + dataType: "json", + success: function( data ) { + response( data ); + }, + error: function() { + response( [] ); + } + }); + }; + } else { + this.source = this.options.source; + } + }, + + _searchTimeout: function( event ) { + clearTimeout( this.searching ); + this.searching = this._delay(function() { + // only search if the value has changed + if ( this.term !== this._value() ) { + this.selectedItem = null; + this.search( null, event ); + } + }, this.options.delay ); + }, + + search: function( value, event ) { + value = value != null ? value : this._value(); + + // always save the actual value, not the one passed as an argument + this.term = this._value(); + + if ( value.length < this.options.minLength ) { + return this.close( event ); + } + + if ( this._trigger( "search", event ) === false ) { + return; + } + + return this._search( value ); + }, + + _search: function( value ) { + this.pending++; + this.element.addClass( "ui-autocomplete-loading" ); + this.cancelSearch = false; + + this.source( { term: value }, this._response() ); + }, + + _response: function() { + var index = ++this.requestIndex; + + return $.proxy(function( content ) { + if ( index === this.requestIndex ) { + this.__response( content ); + } + + this.pending--; + if ( !this.pending ) { + this.element.removeClass( "ui-autocomplete-loading" ); + } + }, this ); + }, + + __response: function( content ) { + if ( content ) { + content = this._normalize( content ); + } + this._trigger( "response", null, { content: content } ); + if ( !this.options.disabled && content && content.length && !this.cancelSearch ) { + this._suggest( content ); + this._trigger( "open" ); + } else { + // use ._close() instead of .close() so we don't cancel future searches + this._close(); + } + }, + + close: function( event ) { + this.cancelSearch = true; + this._close( event ); + }, + + _close: function( event ) { + if ( this.menu.element.is( ":visible" ) ) { + this.menu.element.hide(); + this.menu.blur(); + this.isNewMenu = true; + this._trigger( "close", event ); + } + }, + + _change: function( event ) { + if ( this.previous !== this._value() ) { + this._trigger( "change", event, { item: this.selectedItem } ); + } + }, + + _normalize: function( items ) { + // assume all items have the right format when the first item is complete + if ( items.length && items[0].label && items[0].value ) { + return items; + } + return $.map( items, function( item ) { + if ( typeof item === "string" ) { + return { + label: item, + value: item + }; + } + return $.extend({ + label: item.label || item.value, + value: item.value || item.label + }, item ); + }); + }, + + _suggest: function( items ) { + var ul = this.menu.element.empty(); + this._renderMenu( ul, items ); + this.isNewMenu = true; + this.menu.refresh(); + + // size and position menu + ul.show(); + this._resizeMenu(); + ul.position( $.extend({ + of: this.element + }, this.options.position )); + + if ( this.options.autoFocus ) { + this.menu.next(); + } + }, + + _resizeMenu: function() { + var ul = this.menu.element; + ul.outerWidth( Math.max( + // Firefox wraps long text (possibly a rounding bug) + // so we add 1px to avoid the wrapping (#7513) + ul.width( "" ).outerWidth() + 1, + this.element.outerWidth() + ) ); + }, + + _renderMenu: function( ul, items ) { + var that = this; + $.each( items, function( index, item ) { + that._renderItemData( ul, item ); + }); + }, + + _renderItemData: function( ul, item ) { + return this._renderItem( ul, item ).data( "ui-autocomplete-item", item ); + }, + + _renderItem: function( ul, item ) { + return $( "
  • " ) + .append( $( "" ).text( item.label ) ) + .appendTo( ul ); + }, + + _move: function( direction, event ) { + if ( !this.menu.element.is( ":visible" ) ) { + this.search( null, event ); + return; + } + if ( this.menu.isFirstItem() && /^previous/.test( direction ) || + this.menu.isLastItem() && /^next/.test( direction ) ) { + this._value( this.term ); + this.menu.blur(); + return; + } + this.menu[ direction ]( event ); + }, + + widget: function() { + return this.menu.element; + }, + + _value: function() { + return this.valueMethod.apply( this.element, arguments ); + }, + + _keyEvent: function( keyEvent, event ) { + if ( !this.isMultiLine || this.menu.element.is( ":visible" ) ) { + this._move( keyEvent, event ); + + // prevents moving cursor to beginning/end of the text field in some browsers + event.preventDefault(); + } + } +}); + +$.extend( $.ui.autocomplete, { + escapeRegex: function( value ) { + return value.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g, "\\$&"); + }, + filter: function(array, term) { + var matcher = new RegExp( $.ui.autocomplete.escapeRegex(term), "i" ); + return $.grep( array, function(value) { + return matcher.test( value.label || value.value || value ); + }); + } +}); + + +// live region extension, adding a `messages` option +// NOTE: This is an experimental API. We are still investigating +// a full solution for string manipulation and internationalization. +$.widget( "ui.autocomplete", $.ui.autocomplete, { + options: { + messages: { + noResults: "No search results.", + results: function( amount ) { + return amount + ( amount > 1 ? " results are" : " result is" ) + + " available, use up and down arrow keys to navigate."; + } + } + }, + + __response: function( content ) { + var message; + this._superApply( arguments ); + if ( this.options.disabled || this.cancelSearch ) { + return; + } + if ( content && content.length ) { + message = this.options.messages.results( content.length ); + } else { + message = this.options.messages.noResults; + } + this.liveRegion.text( message ); + } +}); + +}( jQuery )); +(function( $, undefined ) { + +var lastActive, + baseClasses = "ui-button ui-widget ui-state-default ui-corner-all", + typeClasses = "ui-button-icons-only ui-button-icon-only ui-button-text-icons ui-button-text-icon-primary ui-button-text-icon-secondary ui-button-text-only", + formResetHandler = function() { + var form = $( this ); + setTimeout(function() { + form.find( ":ui-button" ).button( "refresh" ); + }, 1 ); + }, + radioGroup = function( radio ) { + var name = radio.name, + form = radio.form, + radios = $( [] ); + if ( name ) { + name = name.replace( /'/g, "\\'" ); + if ( form ) { + radios = $( form ).find( "[name='" + name + "']" ); + } else { + radios = $( "[name='" + name + "']", radio.ownerDocument ) + .filter(function() { + return !this.form; + }); + } + } + return radios; + }; + +$.widget( "ui.button", { + version: "1.10.4", + defaultElement: "").addClass(this._triggerClass). + html(!buttonImage ? buttonText : $("").attr( + { src:buttonImage, alt:buttonText, title:buttonText }))); + input[isRTL ? "before" : "after"](inst.trigger); + inst.trigger.click(function() { + if ($.datepicker._datepickerShowing && $.datepicker._lastInput === input[0]) { + $.datepicker._hideDatepicker(); + } else if ($.datepicker._datepickerShowing && $.datepicker._lastInput !== input[0]) { + $.datepicker._hideDatepicker(); + $.datepicker._showDatepicker(input[0]); + } else { + $.datepicker._showDatepicker(input[0]); + } + return false; + }); + } + }, + + /* Apply the maximum length for the date format. */ + _autoSize: function(inst) { + if (this._get(inst, "autoSize") && !inst.inline) { + var findMax, max, maxI, i, + date = new Date(2009, 12 - 1, 20), // Ensure double digits + dateFormat = this._get(inst, "dateFormat"); + + if (dateFormat.match(/[DM]/)) { + findMax = function(names) { + max = 0; + maxI = 0; + for (i = 0; i < names.length; i++) { + if (names[i].length > max) { + max = names[i].length; + maxI = i; + } + } + return maxI; + }; + date.setMonth(findMax(this._get(inst, (dateFormat.match(/MM/) ? + "monthNames" : "monthNamesShort")))); + date.setDate(findMax(this._get(inst, (dateFormat.match(/DD/) ? + "dayNames" : "dayNamesShort"))) + 20 - date.getDay()); + } + inst.input.attr("size", this._formatDate(inst, date).length); + } + }, + + /* Attach an inline date picker to a div. */ + _inlineDatepicker: function(target, inst) { + var divSpan = $(target); + if (divSpan.hasClass(this.markerClassName)) { + return; + } + divSpan.addClass(this.markerClassName).append(inst.dpDiv); + $.data(target, PROP_NAME, inst); + this._setDate(inst, this._getDefaultDate(inst), true); + this._updateDatepicker(inst); + this._updateAlternate(inst); + //If disabled option is true, disable the datepicker before showing it (see ticket #5665) + if( inst.settings.disabled ) { + this._disableDatepicker( target ); + } + // Set display:block in place of inst.dpDiv.show() which won't work on disconnected elements + // http://bugs.jqueryui.com/ticket/7552 - A Datepicker created on a detached div has zero height + inst.dpDiv.css( "display", "block" ); + }, + + /* Pop-up the date picker in a "dialog" box. + * @param input element - ignored + * @param date string or Date - the initial date to display + * @param onSelect function - the function to call when a date is selected + * @param settings object - update the dialog date picker instance's settings (anonymous object) + * @param pos int[2] - coordinates for the dialog's position within the screen or + * event - with x/y coordinates or + * leave empty for default (screen centre) + * @return the manager object + */ + _dialogDatepicker: function(input, date, onSelect, settings, pos) { + var id, browserWidth, browserHeight, scrollX, scrollY, + inst = this._dialogInst; // internal instance + + if (!inst) { + this.uuid += 1; + id = "dp" + this.uuid; + this._dialogInput = $(""); + this._dialogInput.keydown(this._doKeyDown); + $("body").append(this._dialogInput); + inst = this._dialogInst = this._newInst(this._dialogInput, false); + inst.settings = {}; + $.data(this._dialogInput[0], PROP_NAME, inst); + } + extendRemove(inst.settings, settings || {}); + date = (date && date.constructor === Date ? this._formatDate(inst, date) : date); + this._dialogInput.val(date); + + this._pos = (pos ? (pos.length ? pos : [pos.pageX, pos.pageY]) : null); + if (!this._pos) { + browserWidth = document.documentElement.clientWidth; + browserHeight = document.documentElement.clientHeight; + scrollX = document.documentElement.scrollLeft || document.body.scrollLeft; + scrollY = document.documentElement.scrollTop || document.body.scrollTop; + this._pos = // should use actual width/height below + [(browserWidth / 2) - 100 + scrollX, (browserHeight / 2) - 150 + scrollY]; + } + + // move input on screen for focus, but hidden behind dialog + this._dialogInput.css("left", (this._pos[0] + 20) + "px").css("top", this._pos[1] + "px"); + inst.settings.onSelect = onSelect; + this._inDialog = true; + this.dpDiv.addClass(this._dialogClass); + this._showDatepicker(this._dialogInput[0]); + if ($.blockUI) { + $.blockUI(this.dpDiv); + } + $.data(this._dialogInput[0], PROP_NAME, inst); + return this; + }, + + /* Detach a datepicker from its control. + * @param target element - the target input field or division or span + */ + _destroyDatepicker: function(target) { + var nodeName, + $target = $(target), + inst = $.data(target, PROP_NAME); + + if (!$target.hasClass(this.markerClassName)) { + return; + } + + nodeName = target.nodeName.toLowerCase(); + $.removeData(target, PROP_NAME); + if (nodeName === "input") { + inst.append.remove(); + inst.trigger.remove(); + $target.removeClass(this.markerClassName). + unbind("focus", this._showDatepicker). + unbind("keydown", this._doKeyDown). + unbind("keypress", this._doKeyPress). + unbind("keyup", this._doKeyUp); + } else if (nodeName === "div" || nodeName === "span") { + $target.removeClass(this.markerClassName).empty(); + } + }, + + /* Enable the date picker to a jQuery selection. + * @param target element - the target input field or division or span + */ + _enableDatepicker: function(target) { + var nodeName, inline, + $target = $(target), + inst = $.data(target, PROP_NAME); + + if (!$target.hasClass(this.markerClassName)) { + return; + } + + nodeName = target.nodeName.toLowerCase(); + if (nodeName === "input") { + target.disabled = false; + inst.trigger.filter("button"). + each(function() { this.disabled = false; }).end(). + filter("img").css({opacity: "1.0", cursor: ""}); + } else if (nodeName === "div" || nodeName === "span") { + inline = $target.children("." + this._inlineClass); + inline.children().removeClass("ui-state-disabled"); + inline.find("select.ui-datepicker-month, select.ui-datepicker-year"). + prop("disabled", false); + } + this._disabledInputs = $.map(this._disabledInputs, + function(value) { return (value === target ? null : value); }); // delete entry + }, + + /* Disable the date picker to a jQuery selection. + * @param target element - the target input field or division or span + */ + _disableDatepicker: function(target) { + var nodeName, inline, + $target = $(target), + inst = $.data(target, PROP_NAME); + + if (!$target.hasClass(this.markerClassName)) { + return; + } + + nodeName = target.nodeName.toLowerCase(); + if (nodeName === "input") { + target.disabled = true; + inst.trigger.filter("button"). + each(function() { this.disabled = true; }).end(). + filter("img").css({opacity: "0.5", cursor: "default"}); + } else if (nodeName === "div" || nodeName === "span") { + inline = $target.children("." + this._inlineClass); + inline.children().addClass("ui-state-disabled"); + inline.find("select.ui-datepicker-month, select.ui-datepicker-year"). + prop("disabled", true); + } + this._disabledInputs = $.map(this._disabledInputs, + function(value) { return (value === target ? null : value); }); // delete entry + this._disabledInputs[this._disabledInputs.length] = target; + }, + + /* Is the first field in a jQuery collection disabled as a datepicker? + * @param target element - the target input field or division or span + * @return boolean - true if disabled, false if enabled + */ + _isDisabledDatepicker: function(target) { + if (!target) { + return false; + } + for (var i = 0; i < this._disabledInputs.length; i++) { + if (this._disabledInputs[i] === target) { + return true; + } + } + return false; + }, + + /* Retrieve the instance data for the target control. + * @param target element - the target input field or division or span + * @return object - the associated instance data + * @throws error if a jQuery problem getting data + */ + _getInst: function(target) { + try { + return $.data(target, PROP_NAME); + } + catch (err) { + throw "Missing instance data for this datepicker"; + } + }, + + /* Update or retrieve the settings for a date picker attached to an input field or division. + * @param target element - the target input field or division or span + * @param name object - the new settings to update or + * string - the name of the setting to change or retrieve, + * when retrieving also "all" for all instance settings or + * "defaults" for all global defaults + * @param value any - the new value for the setting + * (omit if above is an object or to retrieve a value) + */ + _optionDatepicker: function(target, name, value) { + var settings, date, minDate, maxDate, + inst = this._getInst(target); + + if (arguments.length === 2 && typeof name === "string") { + return (name === "defaults" ? $.extend({}, $.datepicker._defaults) : + (inst ? (name === "all" ? $.extend({}, inst.settings) : + this._get(inst, name)) : null)); + } + + settings = name || {}; + if (typeof name === "string") { + settings = {}; + settings[name] = value; + } + + if (inst) { + if (this._curInst === inst) { + this._hideDatepicker(); + } + + date = this._getDateDatepicker(target, true); + minDate = this._getMinMaxDate(inst, "min"); + maxDate = this._getMinMaxDate(inst, "max"); + extendRemove(inst.settings, settings); + // reformat the old minDate/maxDate values if dateFormat changes and a new minDate/maxDate isn't provided + if (minDate !== null && settings.dateFormat !== undefined && settings.minDate === undefined) { + inst.settings.minDate = this._formatDate(inst, minDate); + } + if (maxDate !== null && settings.dateFormat !== undefined && settings.maxDate === undefined) { + inst.settings.maxDate = this._formatDate(inst, maxDate); + } + if ( "disabled" in settings ) { + if ( settings.disabled ) { + this._disableDatepicker(target); + } else { + this._enableDatepicker(target); + } + } + this._attachments($(target), inst); + this._autoSize(inst); + this._setDate(inst, date); + this._updateAlternate(inst); + this._updateDatepicker(inst); + } + }, + + // change method deprecated + _changeDatepicker: function(target, name, value) { + this._optionDatepicker(target, name, value); + }, + + /* Redraw the date picker attached to an input field or division. + * @param target element - the target input field or division or span + */ + _refreshDatepicker: function(target) { + var inst = this._getInst(target); + if (inst) { + this._updateDatepicker(inst); + } + }, + + /* Set the dates for a jQuery selection. + * @param target element - the target input field or division or span + * @param date Date - the new date + */ + _setDateDatepicker: function(target, date) { + var inst = this._getInst(target); + if (inst) { + this._setDate(inst, date); + this._updateDatepicker(inst); + this._updateAlternate(inst); + } + }, + + /* Get the date(s) for the first entry in a jQuery selection. + * @param target element - the target input field or division or span + * @param noDefault boolean - true if no default date is to be used + * @return Date - the current date + */ + _getDateDatepicker: function(target, noDefault) { + var inst = this._getInst(target); + if (inst && !inst.inline) { + this._setDateFromField(inst, noDefault); + } + return (inst ? this._getDate(inst) : null); + }, + + /* Handle keystrokes. */ + _doKeyDown: function(event) { + var onSelect, dateStr, sel, + inst = $.datepicker._getInst(event.target), + handled = true, + isRTL = inst.dpDiv.is(".ui-datepicker-rtl"); + + inst._keyEvent = true; + if ($.datepicker._datepickerShowing) { + switch (event.keyCode) { + case 9: $.datepicker._hideDatepicker(); + handled = false; + break; // hide on tab out + case 13: sel = $("td." + $.datepicker._dayOverClass + ":not(." + + $.datepicker._currentClass + ")", inst.dpDiv); + if (sel[0]) { + $.datepicker._selectDay(event.target, inst.selectedMonth, inst.selectedYear, sel[0]); + } + + onSelect = $.datepicker._get(inst, "onSelect"); + if (onSelect) { + dateStr = $.datepicker._formatDate(inst); + + // trigger custom callback + onSelect.apply((inst.input ? inst.input[0] : null), [dateStr, inst]); + } else { + $.datepicker._hideDatepicker(); + } + + return false; // don't submit the form + case 27: $.datepicker._hideDatepicker(); + break; // hide on escape + case 33: $.datepicker._adjustDate(event.target, (event.ctrlKey ? + -$.datepicker._get(inst, "stepBigMonths") : + -$.datepicker._get(inst, "stepMonths")), "M"); + break; // previous month/year on page up/+ ctrl + case 34: $.datepicker._adjustDate(event.target, (event.ctrlKey ? + +$.datepicker._get(inst, "stepBigMonths") : + +$.datepicker._get(inst, "stepMonths")), "M"); + break; // next month/year on page down/+ ctrl + case 35: if (event.ctrlKey || event.metaKey) { + $.datepicker._clearDate(event.target); + } + handled = event.ctrlKey || event.metaKey; + break; // clear on ctrl or command +end + case 36: if (event.ctrlKey || event.metaKey) { + $.datepicker._gotoToday(event.target); + } + handled = event.ctrlKey || event.metaKey; + break; // current on ctrl or command +home + case 37: if (event.ctrlKey || event.metaKey) { + $.datepicker._adjustDate(event.target, (isRTL ? +1 : -1), "D"); + } + handled = event.ctrlKey || event.metaKey; + // -1 day on ctrl or command +left + if (event.originalEvent.altKey) { + $.datepicker._adjustDate(event.target, (event.ctrlKey ? + -$.datepicker._get(inst, "stepBigMonths") : + -$.datepicker._get(inst, "stepMonths")), "M"); + } + // next month/year on alt +left on Mac + break; + case 38: if (event.ctrlKey || event.metaKey) { + $.datepicker._adjustDate(event.target, -7, "D"); + } + handled = event.ctrlKey || event.metaKey; + break; // -1 week on ctrl or command +up + case 39: if (event.ctrlKey || event.metaKey) { + $.datepicker._adjustDate(event.target, (isRTL ? -1 : +1), "D"); + } + handled = event.ctrlKey || event.metaKey; + // +1 day on ctrl or command +right + if (event.originalEvent.altKey) { + $.datepicker._adjustDate(event.target, (event.ctrlKey ? + +$.datepicker._get(inst, "stepBigMonths") : + +$.datepicker._get(inst, "stepMonths")), "M"); + } + // next month/year on alt +right + break; + case 40: if (event.ctrlKey || event.metaKey) { + $.datepicker._adjustDate(event.target, +7, "D"); + } + handled = event.ctrlKey || event.metaKey; + break; // +1 week on ctrl or command +down + default: handled = false; + } + } else if (event.keyCode === 36 && event.ctrlKey) { // display the date picker on ctrl+home + $.datepicker._showDatepicker(this); + } else { + handled = false; + } + + if (handled) { + event.preventDefault(); + event.stopPropagation(); + } + }, + + /* Filter entered characters - based on date format. */ + _doKeyPress: function(event) { + var chars, chr, + inst = $.datepicker._getInst(event.target); + + if ($.datepicker._get(inst, "constrainInput")) { + chars = $.datepicker._possibleChars($.datepicker._get(inst, "dateFormat")); + chr = String.fromCharCode(event.charCode == null ? event.keyCode : event.charCode); + return event.ctrlKey || event.metaKey || (chr < " " || !chars || chars.indexOf(chr) > -1); + } + }, + + /* Synchronise manual entry and field/alternate field. */ + _doKeyUp: function(event) { + var date, + inst = $.datepicker._getInst(event.target); + + if (inst.input.val() !== inst.lastVal) { + try { + date = $.datepicker.parseDate($.datepicker._get(inst, "dateFormat"), + (inst.input ? inst.input.val() : null), + $.datepicker._getFormatConfig(inst)); + + if (date) { // only if valid + $.datepicker._setDateFromField(inst); + $.datepicker._updateAlternate(inst); + $.datepicker._updateDatepicker(inst); + } + } + catch (err) { + } + } + return true; + }, + + /* Pop-up the date picker for a given input field. + * If false returned from beforeShow event handler do not show. + * @param input element - the input field attached to the date picker or + * event - if triggered by focus + */ + _showDatepicker: function(input) { + input = input.target || input; + if (input.nodeName.toLowerCase() !== "input") { // find from button/image trigger + input = $("input", input.parentNode)[0]; + } + + if ($.datepicker._isDisabledDatepicker(input) || $.datepicker._lastInput === input) { // already here + return; + } + + var inst, beforeShow, beforeShowSettings, isFixed, + offset, showAnim, duration; + + inst = $.datepicker._getInst(input); + if ($.datepicker._curInst && $.datepicker._curInst !== inst) { + $.datepicker._curInst.dpDiv.stop(true, true); + if ( inst && $.datepicker._datepickerShowing ) { + $.datepicker._hideDatepicker( $.datepicker._curInst.input[0] ); + } + } + + beforeShow = $.datepicker._get(inst, "beforeShow"); + beforeShowSettings = beforeShow ? beforeShow.apply(input, [input, inst]) : {}; + if(beforeShowSettings === false){ + return; + } + extendRemove(inst.settings, beforeShowSettings); + + inst.lastVal = null; + $.datepicker._lastInput = input; + $.datepicker._setDateFromField(inst); + + if ($.datepicker._inDialog) { // hide cursor + input.value = ""; + } + if (!$.datepicker._pos) { // position below input + $.datepicker._pos = $.datepicker._findPos(input); + $.datepicker._pos[1] += input.offsetHeight; // add the height + } + + isFixed = false; + $(input).parents().each(function() { + isFixed |= $(this).css("position") === "fixed"; + return !isFixed; + }); + + offset = {left: $.datepicker._pos[0], top: $.datepicker._pos[1]}; + $.datepicker._pos = null; + //to avoid flashes on Firefox + inst.dpDiv.empty(); + // determine sizing offscreen + inst.dpDiv.css({position: "absolute", display: "block", top: "-1000px"}); + $.datepicker._updateDatepicker(inst); + // fix width for dynamic number of date pickers + // and adjust position before showing + offset = $.datepicker._checkOffset(inst, offset, isFixed); + inst.dpDiv.css({position: ($.datepicker._inDialog && $.blockUI ? + "static" : (isFixed ? "fixed" : "absolute")), display: "none", + left: offset.left + "px", top: offset.top + "px"}); + + if (!inst.inline) { + showAnim = $.datepicker._get(inst, "showAnim"); + duration = $.datepicker._get(inst, "duration"); + inst.dpDiv.zIndex($(input).zIndex()+1); + $.datepicker._datepickerShowing = true; + + if ( $.effects && $.effects.effect[ showAnim ] ) { + inst.dpDiv.show(showAnim, $.datepicker._get(inst, "showOptions"), duration); + } else { + inst.dpDiv[showAnim || "show"](showAnim ? duration : null); + } + + if ( $.datepicker._shouldFocusInput( inst ) ) { + inst.input.focus(); + } + + $.datepicker._curInst = inst; + } + }, + + /* Generate the date picker content. */ + _updateDatepicker: function(inst) { + this.maxRows = 4; //Reset the max number of rows being displayed (see #7043) + instActive = inst; // for delegate hover events + inst.dpDiv.empty().append(this._generateHTML(inst)); + this._attachHandlers(inst); + inst.dpDiv.find("." + this._dayOverClass + " a").mouseover(); + + var origyearshtml, + numMonths = this._getNumberOfMonths(inst), + cols = numMonths[1], + width = 17; + + inst.dpDiv.removeClass("ui-datepicker-multi-2 ui-datepicker-multi-3 ui-datepicker-multi-4").width(""); + if (cols > 1) { + inst.dpDiv.addClass("ui-datepicker-multi-" + cols).css("width", (width * cols) + "em"); + } + inst.dpDiv[(numMonths[0] !== 1 || numMonths[1] !== 1 ? "add" : "remove") + + "Class"]("ui-datepicker-multi"); + inst.dpDiv[(this._get(inst, "isRTL") ? "add" : "remove") + + "Class"]("ui-datepicker-rtl"); + + if (inst === $.datepicker._curInst && $.datepicker._datepickerShowing && $.datepicker._shouldFocusInput( inst ) ) { + inst.input.focus(); + } + + // deffered render of the years select (to avoid flashes on Firefox) + if( inst.yearshtml ){ + origyearshtml = inst.yearshtml; + setTimeout(function(){ + //assure that inst.yearshtml didn't change. + if( origyearshtml === inst.yearshtml && inst.yearshtml ){ + inst.dpDiv.find("select.ui-datepicker-year:first").replaceWith(inst.yearshtml); + } + origyearshtml = inst.yearshtml = null; + }, 0); + } + }, + + // #6694 - don't focus the input if it's already focused + // this breaks the change event in IE + // Support: IE and jQuery <1.9 + _shouldFocusInput: function( inst ) { + return inst.input && inst.input.is( ":visible" ) && !inst.input.is( ":disabled" ) && !inst.input.is( ":focus" ); + }, + + /* Check positioning to remain on screen. */ + _checkOffset: function(inst, offset, isFixed) { + var dpWidth = inst.dpDiv.outerWidth(), + dpHeight = inst.dpDiv.outerHeight(), + inputWidth = inst.input ? inst.input.outerWidth() : 0, + inputHeight = inst.input ? inst.input.outerHeight() : 0, + viewWidth = document.documentElement.clientWidth + (isFixed ? 0 : $(document).scrollLeft()), + viewHeight = document.documentElement.clientHeight + (isFixed ? 0 : $(document).scrollTop()); + + offset.left -= (this._get(inst, "isRTL") ? (dpWidth - inputWidth) : 0); + offset.left -= (isFixed && offset.left === inst.input.offset().left) ? $(document).scrollLeft() : 0; + offset.top -= (isFixed && offset.top === (inst.input.offset().top + inputHeight)) ? $(document).scrollTop() : 0; + + // now check if datepicker is showing outside window viewport - move to a better place if so. + offset.left -= Math.min(offset.left, (offset.left + dpWidth > viewWidth && viewWidth > dpWidth) ? + Math.abs(offset.left + dpWidth - viewWidth) : 0); + offset.top -= Math.min(offset.top, (offset.top + dpHeight > viewHeight && viewHeight > dpHeight) ? + Math.abs(dpHeight + inputHeight) : 0); + + return offset; + }, + + /* Find an object's position on the screen. */ + _findPos: function(obj) { + var position, + inst = this._getInst(obj), + isRTL = this._get(inst, "isRTL"); + + while (obj && (obj.type === "hidden" || obj.nodeType !== 1 || $.expr.filters.hidden(obj))) { + obj = obj[isRTL ? "previousSibling" : "nextSibling"]; + } + + position = $(obj).offset(); + return [position.left, position.top]; + }, + + /* Hide the date picker from view. + * @param input element - the input field attached to the date picker + */ + _hideDatepicker: function(input) { + var showAnim, duration, postProcess, onClose, + inst = this._curInst; + + if (!inst || (input && inst !== $.data(input, PROP_NAME))) { + return; + } + + if (this._datepickerShowing) { + showAnim = this._get(inst, "showAnim"); + duration = this._get(inst, "duration"); + postProcess = function() { + $.datepicker._tidyDialog(inst); + }; + + // DEPRECATED: after BC for 1.8.x $.effects[ showAnim ] is not needed + if ( $.effects && ( $.effects.effect[ showAnim ] || $.effects[ showAnim ] ) ) { + inst.dpDiv.hide(showAnim, $.datepicker._get(inst, "showOptions"), duration, postProcess); + } else { + inst.dpDiv[(showAnim === "slideDown" ? "slideUp" : + (showAnim === "fadeIn" ? "fadeOut" : "hide"))]((showAnim ? duration : null), postProcess); + } + + if (!showAnim) { + postProcess(); + } + this._datepickerShowing = false; + + onClose = this._get(inst, "onClose"); + if (onClose) { + onClose.apply((inst.input ? inst.input[0] : null), [(inst.input ? inst.input.val() : ""), inst]); + } + + this._lastInput = null; + if (this._inDialog) { + this._dialogInput.css({ position: "absolute", left: "0", top: "-100px" }); + if ($.blockUI) { + $.unblockUI(); + $("body").append(this.dpDiv); + } + } + this._inDialog = false; + } + }, + + /* Tidy up after a dialog display. */ + _tidyDialog: function(inst) { + inst.dpDiv.removeClass(this._dialogClass).unbind(".ui-datepicker-calendar"); + }, + + /* Close date picker if clicked elsewhere. */ + _checkExternalClick: function(event) { + if (!$.datepicker._curInst) { + return; + } + + var $target = $(event.target), + inst = $.datepicker._getInst($target[0]); + + if ( ( ( $target[0].id !== $.datepicker._mainDivId && + $target.parents("#" + $.datepicker._mainDivId).length === 0 && + !$target.hasClass($.datepicker.markerClassName) && + !$target.closest("." + $.datepicker._triggerClass).length && + $.datepicker._datepickerShowing && !($.datepicker._inDialog && $.blockUI) ) ) || + ( $target.hasClass($.datepicker.markerClassName) && $.datepicker._curInst !== inst ) ) { + $.datepicker._hideDatepicker(); + } + }, + + /* Adjust one of the date sub-fields. */ + _adjustDate: function(id, offset, period) { + var target = $(id), + inst = this._getInst(target[0]); + + if (this._isDisabledDatepicker(target[0])) { + return; + } + this._adjustInstDate(inst, offset + + (period === "M" ? this._get(inst, "showCurrentAtPos") : 0), // undo positioning + period); + this._updateDatepicker(inst); + }, + + /* Action for current link. */ + _gotoToday: function(id) { + var date, + target = $(id), + inst = this._getInst(target[0]); + + if (this._get(inst, "gotoCurrent") && inst.currentDay) { + inst.selectedDay = inst.currentDay; + inst.drawMonth = inst.selectedMonth = inst.currentMonth; + inst.drawYear = inst.selectedYear = inst.currentYear; + } else { + date = new Date(); + inst.selectedDay = date.getDate(); + inst.drawMonth = inst.selectedMonth = date.getMonth(); + inst.drawYear = inst.selectedYear = date.getFullYear(); + } + this._notifyChange(inst); + this._adjustDate(target); + }, + + /* Action for selecting a new month/year. */ + _selectMonthYear: function(id, select, period) { + var target = $(id), + inst = this._getInst(target[0]); + + inst["selected" + (period === "M" ? "Month" : "Year")] = + inst["draw" + (period === "M" ? "Month" : "Year")] = + parseInt(select.options[select.selectedIndex].value,10); + + this._notifyChange(inst); + this._adjustDate(target); + }, + + /* Action for selecting a day. */ + _selectDay: function(id, month, year, td) { + var inst, + target = $(id); + + if ($(td).hasClass(this._unselectableClass) || this._isDisabledDatepicker(target[0])) { + return; + } + + inst = this._getInst(target[0]); + inst.selectedDay = inst.currentDay = $("a", td).html(); + inst.selectedMonth = inst.currentMonth = month; + inst.selectedYear = inst.currentYear = year; + this._selectDate(id, this._formatDate(inst, + inst.currentDay, inst.currentMonth, inst.currentYear)); + }, + + /* Erase the input field and hide the date picker. */ + _clearDate: function(id) { + var target = $(id); + this._selectDate(target, ""); + }, + + /* Update the input field with the selected date. */ + _selectDate: function(id, dateStr) { + var onSelect, + target = $(id), + inst = this._getInst(target[0]); + + dateStr = (dateStr != null ? dateStr : this._formatDate(inst)); + if (inst.input) { + inst.input.val(dateStr); + } + this._updateAlternate(inst); + + onSelect = this._get(inst, "onSelect"); + if (onSelect) { + onSelect.apply((inst.input ? inst.input[0] : null), [dateStr, inst]); // trigger custom callback + } else if (inst.input) { + inst.input.trigger("change"); // fire the change event + } + + if (inst.inline){ + this._updateDatepicker(inst); + } else { + this._hideDatepicker(); + this._lastInput = inst.input[0]; + if (typeof(inst.input[0]) !== "object") { + inst.input.focus(); // restore focus + } + this._lastInput = null; + } + }, + + /* Update any alternate field to synchronise with the main field. */ + _updateAlternate: function(inst) { + var altFormat, date, dateStr, + altField = this._get(inst, "altField"); + + if (altField) { // update alternate field too + altFormat = this._get(inst, "altFormat") || this._get(inst, "dateFormat"); + date = this._getDate(inst); + dateStr = this.formatDate(altFormat, date, this._getFormatConfig(inst)); + $(altField).each(function() { $(this).val(dateStr); }); + } + }, + + /* Set as beforeShowDay function to prevent selection of weekends. + * @param date Date - the date to customise + * @return [boolean, string] - is this date selectable?, what is its CSS class? + */ + noWeekends: function(date) { + var day = date.getDay(); + return [(day > 0 && day < 6), ""]; + }, + + /* Set as calculateWeek to determine the week of the year based on the ISO 8601 definition. + * @param date Date - the date to get the week for + * @return number - the number of the week within the year that contains this date + */ + iso8601Week: function(date) { + var time, + checkDate = new Date(date.getTime()); + + // Find Thursday of this week starting on Monday + checkDate.setDate(checkDate.getDate() + 4 - (checkDate.getDay() || 7)); + + time = checkDate.getTime(); + checkDate.setMonth(0); // Compare with Jan 1 + checkDate.setDate(1); + return Math.floor(Math.round((time - checkDate) / 86400000) / 7) + 1; + }, + + /* Parse a string value into a date object. + * See formatDate below for the possible formats. + * + * @param format string - the expected format of the date + * @param value string - the date in the above format + * @param settings Object - attributes include: + * shortYearCutoff number - the cutoff year for determining the century (optional) + * dayNamesShort string[7] - abbreviated names of the days from Sunday (optional) + * dayNames string[7] - names of the days from Sunday (optional) + * monthNamesShort string[12] - abbreviated names of the months (optional) + * monthNames string[12] - names of the months (optional) + * @return Date - the extracted date value or null if value is blank + */ + parseDate: function (format, value, settings) { + if (format == null || value == null) { + throw "Invalid arguments"; + } + + value = (typeof value === "object" ? value.toString() : value + ""); + if (value === "") { + return null; + } + + var iFormat, dim, extra, + iValue = 0, + shortYearCutoffTemp = (settings ? settings.shortYearCutoff : null) || this._defaults.shortYearCutoff, + shortYearCutoff = (typeof shortYearCutoffTemp !== "string" ? shortYearCutoffTemp : + new Date().getFullYear() % 100 + parseInt(shortYearCutoffTemp, 10)), + dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort, + dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames, + monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort, + monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames, + year = -1, + month = -1, + day = -1, + doy = -1, + literal = false, + date, + // Check whether a format character is doubled + lookAhead = function(match) { + var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) === match); + if (matches) { + iFormat++; + } + return matches; + }, + // Extract a number from the string value + getNumber = function(match) { + var isDoubled = lookAhead(match), + size = (match === "@" ? 14 : (match === "!" ? 20 : + (match === "y" && isDoubled ? 4 : (match === "o" ? 3 : 2)))), + digits = new RegExp("^\\d{1," + size + "}"), + num = value.substring(iValue).match(digits); + if (!num) { + throw "Missing number at position " + iValue; + } + iValue += num[0].length; + return parseInt(num[0], 10); + }, + // Extract a name from the string value and convert to an index + getName = function(match, shortNames, longNames) { + var index = -1, + names = $.map(lookAhead(match) ? longNames : shortNames, function (v, k) { + return [ [k, v] ]; + }).sort(function (a, b) { + return -(a[1].length - b[1].length); + }); + + $.each(names, function (i, pair) { + var name = pair[1]; + if (value.substr(iValue, name.length).toLowerCase() === name.toLowerCase()) { + index = pair[0]; + iValue += name.length; + return false; + } + }); + if (index !== -1) { + return index + 1; + } else { + throw "Unknown name at position " + iValue; + } + }, + // Confirm that a literal character matches the string value + checkLiteral = function() { + if (value.charAt(iValue) !== format.charAt(iFormat)) { + throw "Unexpected literal at position " + iValue; + } + iValue++; + }; + + for (iFormat = 0; iFormat < format.length; iFormat++) { + if (literal) { + if (format.charAt(iFormat) === "'" && !lookAhead("'")) { + literal = false; + } else { + checkLiteral(); + } + } else { + switch (format.charAt(iFormat)) { + case "d": + day = getNumber("d"); + break; + case "D": + getName("D", dayNamesShort, dayNames); + break; + case "o": + doy = getNumber("o"); + break; + case "m": + month = getNumber("m"); + break; + case "M": + month = getName("M", monthNamesShort, monthNames); + break; + case "y": + year = getNumber("y"); + break; + case "@": + date = new Date(getNumber("@")); + year = date.getFullYear(); + month = date.getMonth() + 1; + day = date.getDate(); + break; + case "!": + date = new Date((getNumber("!") - this._ticksTo1970) / 10000); + year = date.getFullYear(); + month = date.getMonth() + 1; + day = date.getDate(); + break; + case "'": + if (lookAhead("'")){ + checkLiteral(); + } else { + literal = true; + } + break; + default: + checkLiteral(); + } + } + } + + if (iValue < value.length){ + extra = value.substr(iValue); + if (!/^\s+/.test(extra)) { + throw "Extra/unparsed characters found in date: " + extra; + } + } + + if (year === -1) { + year = new Date().getFullYear(); + } else if (year < 100) { + year += new Date().getFullYear() - new Date().getFullYear() % 100 + + (year <= shortYearCutoff ? 0 : -100); + } + + if (doy > -1) { + month = 1; + day = doy; + do { + dim = this._getDaysInMonth(year, month - 1); + if (day <= dim) { + break; + } + month++; + day -= dim; + } while (true); + } + + date = this._daylightSavingAdjust(new Date(year, month - 1, day)); + if (date.getFullYear() !== year || date.getMonth() + 1 !== month || date.getDate() !== day) { + throw "Invalid date"; // E.g. 31/02/00 + } + return date; + }, + + /* Standard date formats. */ + ATOM: "yy-mm-dd", // RFC 3339 (ISO 8601) + COOKIE: "D, dd M yy", + ISO_8601: "yy-mm-dd", + RFC_822: "D, d M y", + RFC_850: "DD, dd-M-y", + RFC_1036: "D, d M y", + RFC_1123: "D, d M yy", + RFC_2822: "D, d M yy", + RSS: "D, d M y", // RFC 822 + TICKS: "!", + TIMESTAMP: "@", + W3C: "yy-mm-dd", // ISO 8601 + + _ticksTo1970: (((1970 - 1) * 365 + Math.floor(1970 / 4) - Math.floor(1970 / 100) + + Math.floor(1970 / 400)) * 24 * 60 * 60 * 10000000), + + /* Format a date object into a string value. + * The format can be combinations of the following: + * d - day of month (no leading zero) + * dd - day of month (two digit) + * o - day of year (no leading zeros) + * oo - day of year (three digit) + * D - day name short + * DD - day name long + * m - month of year (no leading zero) + * mm - month of year (two digit) + * M - month name short + * MM - month name long + * y - year (two digit) + * yy - year (four digit) + * @ - Unix timestamp (ms since 01/01/1970) + * ! - Windows ticks (100ns since 01/01/0001) + * "..." - literal text + * '' - single quote + * + * @param format string - the desired format of the date + * @param date Date - the date value to format + * @param settings Object - attributes include: + * dayNamesShort string[7] - abbreviated names of the days from Sunday (optional) + * dayNames string[7] - names of the days from Sunday (optional) + * monthNamesShort string[12] - abbreviated names of the months (optional) + * monthNames string[12] - names of the months (optional) + * @return string - the date in the above format + */ + formatDate: function (format, date, settings) { + if (!date) { + return ""; + } + + var iFormat, + dayNamesShort = (settings ? settings.dayNamesShort : null) || this._defaults.dayNamesShort, + dayNames = (settings ? settings.dayNames : null) || this._defaults.dayNames, + monthNamesShort = (settings ? settings.monthNamesShort : null) || this._defaults.monthNamesShort, + monthNames = (settings ? settings.monthNames : null) || this._defaults.monthNames, + // Check whether a format character is doubled + lookAhead = function(match) { + var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) === match); + if (matches) { + iFormat++; + } + return matches; + }, + // Format a number, with leading zero if necessary + formatNumber = function(match, value, len) { + var num = "" + value; + if (lookAhead(match)) { + while (num.length < len) { + num = "0" + num; + } + } + return num; + }, + // Format a name, short or long as requested + formatName = function(match, value, shortNames, longNames) { + return (lookAhead(match) ? longNames[value] : shortNames[value]); + }, + output = "", + literal = false; + + if (date) { + for (iFormat = 0; iFormat < format.length; iFormat++) { + if (literal) { + if (format.charAt(iFormat) === "'" && !lookAhead("'")) { + literal = false; + } else { + output += format.charAt(iFormat); + } + } else { + switch (format.charAt(iFormat)) { + case "d": + output += formatNumber("d", date.getDate(), 2); + break; + case "D": + output += formatName("D", date.getDay(), dayNamesShort, dayNames); + break; + case "o": + output += formatNumber("o", + Math.round((new Date(date.getFullYear(), date.getMonth(), date.getDate()).getTime() - new Date(date.getFullYear(), 0, 0).getTime()) / 86400000), 3); + break; + case "m": + output += formatNumber("m", date.getMonth() + 1, 2); + break; + case "M": + output += formatName("M", date.getMonth(), monthNamesShort, monthNames); + break; + case "y": + output += (lookAhead("y") ? date.getFullYear() : + (date.getYear() % 100 < 10 ? "0" : "") + date.getYear() % 100); + break; + case "@": + output += date.getTime(); + break; + case "!": + output += date.getTime() * 10000 + this._ticksTo1970; + break; + case "'": + if (lookAhead("'")) { + output += "'"; + } else { + literal = true; + } + break; + default: + output += format.charAt(iFormat); + } + } + } + } + return output; + }, + + /* Extract all possible characters from the date format. */ + _possibleChars: function (format) { + var iFormat, + chars = "", + literal = false, + // Check whether a format character is doubled + lookAhead = function(match) { + var matches = (iFormat + 1 < format.length && format.charAt(iFormat + 1) === match); + if (matches) { + iFormat++; + } + return matches; + }; + + for (iFormat = 0; iFormat < format.length; iFormat++) { + if (literal) { + if (format.charAt(iFormat) === "'" && !lookAhead("'")) { + literal = false; + } else { + chars += format.charAt(iFormat); + } + } else { + switch (format.charAt(iFormat)) { + case "d": case "m": case "y": case "@": + chars += "0123456789"; + break; + case "D": case "M": + return null; // Accept anything + case "'": + if (lookAhead("'")) { + chars += "'"; + } else { + literal = true; + } + break; + default: + chars += format.charAt(iFormat); + } + } + } + return chars; + }, + + /* Get a setting value, defaulting if necessary. */ + _get: function(inst, name) { + return inst.settings[name] !== undefined ? + inst.settings[name] : this._defaults[name]; + }, + + /* Parse existing date and initialise date picker. */ + _setDateFromField: function(inst, noDefault) { + if (inst.input.val() === inst.lastVal) { + return; + } + + var dateFormat = this._get(inst, "dateFormat"), + dates = inst.lastVal = inst.input ? inst.input.val() : null, + defaultDate = this._getDefaultDate(inst), + date = defaultDate, + settings = this._getFormatConfig(inst); + + try { + date = this.parseDate(dateFormat, dates, settings) || defaultDate; + } catch (event) { + dates = (noDefault ? "" : dates); + } + inst.selectedDay = date.getDate(); + inst.drawMonth = inst.selectedMonth = date.getMonth(); + inst.drawYear = inst.selectedYear = date.getFullYear(); + inst.currentDay = (dates ? date.getDate() : 0); + inst.currentMonth = (dates ? date.getMonth() : 0); + inst.currentYear = (dates ? date.getFullYear() : 0); + this._adjustInstDate(inst); + }, + + /* Retrieve the default date shown on opening. */ + _getDefaultDate: function(inst) { + return this._restrictMinMax(inst, + this._determineDate(inst, this._get(inst, "defaultDate"), new Date())); + }, + + /* A date may be specified as an exact value or a relative one. */ + _determineDate: function(inst, date, defaultDate) { + var offsetNumeric = function(offset) { + var date = new Date(); + date.setDate(date.getDate() + offset); + return date; + }, + offsetString = function(offset) { + try { + return $.datepicker.parseDate($.datepicker._get(inst, "dateFormat"), + offset, $.datepicker._getFormatConfig(inst)); + } + catch (e) { + // Ignore + } + + var date = (offset.toLowerCase().match(/^c/) ? + $.datepicker._getDate(inst) : null) || new Date(), + year = date.getFullYear(), + month = date.getMonth(), + day = date.getDate(), + pattern = /([+\-]?[0-9]+)\s*(d|D|w|W|m|M|y|Y)?/g, + matches = pattern.exec(offset); + + while (matches) { + switch (matches[2] || "d") { + case "d" : case "D" : + day += parseInt(matches[1],10); break; + case "w" : case "W" : + day += parseInt(matches[1],10) * 7; break; + case "m" : case "M" : + month += parseInt(matches[1],10); + day = Math.min(day, $.datepicker._getDaysInMonth(year, month)); + break; + case "y": case "Y" : + year += parseInt(matches[1],10); + day = Math.min(day, $.datepicker._getDaysInMonth(year, month)); + break; + } + matches = pattern.exec(offset); + } + return new Date(year, month, day); + }, + newDate = (date == null || date === "" ? defaultDate : (typeof date === "string" ? offsetString(date) : + (typeof date === "number" ? (isNaN(date) ? defaultDate : offsetNumeric(date)) : new Date(date.getTime())))); + + newDate = (newDate && newDate.toString() === "Invalid Date" ? defaultDate : newDate); + if (newDate) { + newDate.setHours(0); + newDate.setMinutes(0); + newDate.setSeconds(0); + newDate.setMilliseconds(0); + } + return this._daylightSavingAdjust(newDate); + }, + + /* Handle switch to/from daylight saving. + * Hours may be non-zero on daylight saving cut-over: + * > 12 when midnight changeover, but then cannot generate + * midnight datetime, so jump to 1AM, otherwise reset. + * @param date (Date) the date to check + * @return (Date) the corrected date + */ + _daylightSavingAdjust: function(date) { + if (!date) { + return null; + } + date.setHours(date.getHours() > 12 ? date.getHours() + 2 : 0); + return date; + }, + + /* Set the date(s) directly. */ + _setDate: function(inst, date, noChange) { + var clear = !date, + origMonth = inst.selectedMonth, + origYear = inst.selectedYear, + newDate = this._restrictMinMax(inst, this._determineDate(inst, date, new Date())); + + inst.selectedDay = inst.currentDay = newDate.getDate(); + inst.drawMonth = inst.selectedMonth = inst.currentMonth = newDate.getMonth(); + inst.drawYear = inst.selectedYear = inst.currentYear = newDate.getFullYear(); + if ((origMonth !== inst.selectedMonth || origYear !== inst.selectedYear) && !noChange) { + this._notifyChange(inst); + } + this._adjustInstDate(inst); + if (inst.input) { + inst.input.val(clear ? "" : this._formatDate(inst)); + } + }, + + /* Retrieve the date(s) directly. */ + _getDate: function(inst) { + var startDate = (!inst.currentYear || (inst.input && inst.input.val() === "") ? null : + this._daylightSavingAdjust(new Date( + inst.currentYear, inst.currentMonth, inst.currentDay))); + return startDate; + }, + + /* Attach the onxxx handlers. These are declared statically so + * they work with static code transformers like Caja. + */ + _attachHandlers: function(inst) { + var stepMonths = this._get(inst, "stepMonths"), + id = "#" + inst.id.replace( /\\\\/g, "\\" ); + inst.dpDiv.find("[data-handler]").map(function () { + var handler = { + prev: function () { + $.datepicker._adjustDate(id, -stepMonths, "M"); + }, + next: function () { + $.datepicker._adjustDate(id, +stepMonths, "M"); + }, + hide: function () { + $.datepicker._hideDatepicker(); + }, + today: function () { + $.datepicker._gotoToday(id); + }, + selectDay: function () { + $.datepicker._selectDay(id, +this.getAttribute("data-month"), +this.getAttribute("data-year"), this); + return false; + }, + selectMonth: function () { + $.datepicker._selectMonthYear(id, this, "M"); + return false; + }, + selectYear: function () { + $.datepicker._selectMonthYear(id, this, "Y"); + return false; + } + }; + $(this).bind(this.getAttribute("data-event"), handler[this.getAttribute("data-handler")]); + }); + }, + + /* Generate the HTML for the current state of the date picker. */ + _generateHTML: function(inst) { + var maxDraw, prevText, prev, nextText, next, currentText, gotoDate, + controls, buttonPanel, firstDay, showWeek, dayNames, dayNamesMin, + monthNames, monthNamesShort, beforeShowDay, showOtherMonths, + selectOtherMonths, defaultDate, html, dow, row, group, col, selectedDate, + cornerClass, calender, thead, day, daysInMonth, leadDays, curRows, numRows, + printDate, dRow, tbody, daySettings, otherMonth, unselectable, + tempDate = new Date(), + today = this._daylightSavingAdjust( + new Date(tempDate.getFullYear(), tempDate.getMonth(), tempDate.getDate())), // clear time + isRTL = this._get(inst, "isRTL"), + showButtonPanel = this._get(inst, "showButtonPanel"), + hideIfNoPrevNext = this._get(inst, "hideIfNoPrevNext"), + navigationAsDateFormat = this._get(inst, "navigationAsDateFormat"), + numMonths = this._getNumberOfMonths(inst), + showCurrentAtPos = this._get(inst, "showCurrentAtPos"), + stepMonths = this._get(inst, "stepMonths"), + isMultiMonth = (numMonths[0] !== 1 || numMonths[1] !== 1), + currentDate = this._daylightSavingAdjust((!inst.currentDay ? new Date(9999, 9, 9) : + new Date(inst.currentYear, inst.currentMonth, inst.currentDay))), + minDate = this._getMinMaxDate(inst, "min"), + maxDate = this._getMinMaxDate(inst, "max"), + drawMonth = inst.drawMonth - showCurrentAtPos, + drawYear = inst.drawYear; + + if (drawMonth < 0) { + drawMonth += 12; + drawYear--; + } + if (maxDate) { + maxDraw = this._daylightSavingAdjust(new Date(maxDate.getFullYear(), + maxDate.getMonth() - (numMonths[0] * numMonths[1]) + 1, maxDate.getDate())); + maxDraw = (minDate && maxDraw < minDate ? minDate : maxDraw); + while (this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1)) > maxDraw) { + drawMonth--; + if (drawMonth < 0) { + drawMonth = 11; + drawYear--; + } + } + } + inst.drawMonth = drawMonth; + inst.drawYear = drawYear; + + prevText = this._get(inst, "prevText"); + prevText = (!navigationAsDateFormat ? prevText : this.formatDate(prevText, + this._daylightSavingAdjust(new Date(drawYear, drawMonth - stepMonths, 1)), + this._getFormatConfig(inst))); + + prev = (this._canAdjustMonth(inst, -1, drawYear, drawMonth) ? + "" + prevText + "" : + (hideIfNoPrevNext ? "" : "" + prevText + "")); + + nextText = this._get(inst, "nextText"); + nextText = (!navigationAsDateFormat ? nextText : this.formatDate(nextText, + this._daylightSavingAdjust(new Date(drawYear, drawMonth + stepMonths, 1)), + this._getFormatConfig(inst))); + + next = (this._canAdjustMonth(inst, +1, drawYear, drawMonth) ? + "" + nextText + "" : + (hideIfNoPrevNext ? "" : "" + nextText + "")); + + currentText = this._get(inst, "currentText"); + gotoDate = (this._get(inst, "gotoCurrent") && inst.currentDay ? currentDate : today); + currentText = (!navigationAsDateFormat ? currentText : + this.formatDate(currentText, gotoDate, this._getFormatConfig(inst))); + + controls = (!inst.inline ? "" : ""); + + buttonPanel = (showButtonPanel) ? "
    " + (isRTL ? controls : "") + + (this._isInRange(inst, gotoDate) ? "" : "") + (isRTL ? "" : controls) + "
    " : ""; + + firstDay = parseInt(this._get(inst, "firstDay"),10); + firstDay = (isNaN(firstDay) ? 0 : firstDay); + + showWeek = this._get(inst, "showWeek"); + dayNames = this._get(inst, "dayNames"); + dayNamesMin = this._get(inst, "dayNamesMin"); + monthNames = this._get(inst, "monthNames"); + monthNamesShort = this._get(inst, "monthNamesShort"); + beforeShowDay = this._get(inst, "beforeShowDay"); + showOtherMonths = this._get(inst, "showOtherMonths"); + selectOtherMonths = this._get(inst, "selectOtherMonths"); + defaultDate = this._getDefaultDate(inst); + html = ""; + dow; + for (row = 0; row < numMonths[0]; row++) { + group = ""; + this.maxRows = 4; + for (col = 0; col < numMonths[1]; col++) { + selectedDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, inst.selectedDay)); + cornerClass = " ui-corner-all"; + calender = ""; + if (isMultiMonth) { + calender += "
    "; + } + calender += "
    " + + (/all|left/.test(cornerClass) && row === 0 ? (isRTL ? next : prev) : "") + + (/all|right/.test(cornerClass) && row === 0 ? (isRTL ? prev : next) : "") + + this._generateMonthYearHeader(inst, drawMonth, drawYear, minDate, maxDate, + row > 0 || col > 0, monthNames, monthNamesShort) + // draw month headers + "
    " + + ""; + thead = (showWeek ? "" : ""); + for (dow = 0; dow < 7; dow++) { // days of the week + day = (dow + firstDay) % 7; + thead += "= 5 ? " class='ui-datepicker-week-end'" : "") + ">" + + "" + dayNamesMin[day] + ""; + } + calender += thead + ""; + daysInMonth = this._getDaysInMonth(drawYear, drawMonth); + if (drawYear === inst.selectedYear && drawMonth === inst.selectedMonth) { + inst.selectedDay = Math.min(inst.selectedDay, daysInMonth); + } + leadDays = (this._getFirstDayOfMonth(drawYear, drawMonth) - firstDay + 7) % 7; + curRows = Math.ceil((leadDays + daysInMonth) / 7); // calculate the number of rows to generate + numRows = (isMultiMonth ? this.maxRows > curRows ? this.maxRows : curRows : curRows); //If multiple months, use the higher number of rows (see #7043) + this.maxRows = numRows; + printDate = this._daylightSavingAdjust(new Date(drawYear, drawMonth, 1 - leadDays)); + for (dRow = 0; dRow < numRows; dRow++) { // create date picker rows + calender += ""; + tbody = (!showWeek ? "" : ""); + for (dow = 0; dow < 7; dow++) { // create date picker days + daySettings = (beforeShowDay ? + beforeShowDay.apply((inst.input ? inst.input[0] : null), [printDate]) : [true, ""]); + otherMonth = (printDate.getMonth() !== drawMonth); + unselectable = (otherMonth && !selectOtherMonths) || !daySettings[0] || + (minDate && printDate < minDate) || (maxDate && printDate > maxDate); + tbody += ""; // display selectable date + printDate.setDate(printDate.getDate() + 1); + printDate = this._daylightSavingAdjust(printDate); + } + calender += tbody + ""; + } + drawMonth++; + if (drawMonth > 11) { + drawMonth = 0; + drawYear++; + } + calender += "
    " + this._get(inst, "weekHeader") + "
    " + + this._get(inst, "calculateWeek")(printDate) + "" + // actions + (otherMonth && !showOtherMonths ? " " : // display for other months + (unselectable ? "" + printDate.getDate() + "" : "" + printDate.getDate() + "")) + "
    " + (isMultiMonth ? "
    " + + ((numMonths[0] > 0 && col === numMonths[1]-1) ? "
    " : "") : ""); + group += calender; + } + html += group; + } + html += buttonPanel; + inst._keyEvent = false; + return html; + }, + + /* Generate the month and year header. */ + _generateMonthYearHeader: function(inst, drawMonth, drawYear, minDate, maxDate, + secondary, monthNames, monthNamesShort) { + + var inMinYear, inMaxYear, month, years, thisYear, determineYear, year, endYear, + changeMonth = this._get(inst, "changeMonth"), + changeYear = this._get(inst, "changeYear"), + showMonthAfterYear = this._get(inst, "showMonthAfterYear"), + html = "
    ", + monthHtml = ""; + + // month selection + if (secondary || !changeMonth) { + monthHtml += "" + monthNames[drawMonth] + ""; + } else { + inMinYear = (minDate && minDate.getFullYear() === drawYear); + inMaxYear = (maxDate && maxDate.getFullYear() === drawYear); + monthHtml += ""; + } + + if (!showMonthAfterYear) { + html += monthHtml + (secondary || !(changeMonth && changeYear) ? " " : ""); + } + + // year selection + if ( !inst.yearshtml ) { + inst.yearshtml = ""; + if (secondary || !changeYear) { + html += "" + drawYear + ""; + } else { + // determine range of years to display + years = this._get(inst, "yearRange").split(":"); + thisYear = new Date().getFullYear(); + determineYear = function(value) { + var year = (value.match(/c[+\-].*/) ? drawYear + parseInt(value.substring(1), 10) : + (value.match(/[+\-].*/) ? thisYear + parseInt(value, 10) : + parseInt(value, 10))); + return (isNaN(year) ? thisYear : year); + }; + year = determineYear(years[0]); + endYear = Math.max(year, determineYear(years[1] || "")); + year = (minDate ? Math.max(year, minDate.getFullYear()) : year); + endYear = (maxDate ? Math.min(endYear, maxDate.getFullYear()) : endYear); + inst.yearshtml += ""; + + html += inst.yearshtml; + inst.yearshtml = null; + } + } + + html += this._get(inst, "yearSuffix"); + if (showMonthAfterYear) { + html += (secondary || !(changeMonth && changeYear) ? " " : "") + monthHtml; + } + html += "
    "; // Close datepicker_header + return html; + }, + + /* Adjust one of the date sub-fields. */ + _adjustInstDate: function(inst, offset, period) { + var year = inst.drawYear + (period === "Y" ? offset : 0), + month = inst.drawMonth + (period === "M" ? offset : 0), + day = Math.min(inst.selectedDay, this._getDaysInMonth(year, month)) + (period === "D" ? offset : 0), + date = this._restrictMinMax(inst, this._daylightSavingAdjust(new Date(year, month, day))); + + inst.selectedDay = date.getDate(); + inst.drawMonth = inst.selectedMonth = date.getMonth(); + inst.drawYear = inst.selectedYear = date.getFullYear(); + if (period === "M" || period === "Y") { + this._notifyChange(inst); + } + }, + + /* Ensure a date is within any min/max bounds. */ + _restrictMinMax: function(inst, date) { + var minDate = this._getMinMaxDate(inst, "min"), + maxDate = this._getMinMaxDate(inst, "max"), + newDate = (minDate && date < minDate ? minDate : date); + return (maxDate && newDate > maxDate ? maxDate : newDate); + }, + + /* Notify change of month/year. */ + _notifyChange: function(inst) { + var onChange = this._get(inst, "onChangeMonthYear"); + if (onChange) { + onChange.apply((inst.input ? inst.input[0] : null), + [inst.selectedYear, inst.selectedMonth + 1, inst]); + } + }, + + /* Determine the number of months to show. */ + _getNumberOfMonths: function(inst) { + var numMonths = this._get(inst, "numberOfMonths"); + return (numMonths == null ? [1, 1] : (typeof numMonths === "number" ? [1, numMonths] : numMonths)); + }, + + /* Determine the current maximum date - ensure no time components are set. */ + _getMinMaxDate: function(inst, minMax) { + return this._determineDate(inst, this._get(inst, minMax + "Date"), null); + }, + + /* Find the number of days in a given month. */ + _getDaysInMonth: function(year, month) { + return 32 - this._daylightSavingAdjust(new Date(year, month, 32)).getDate(); + }, + + /* Find the day of the week of the first of a month. */ + _getFirstDayOfMonth: function(year, month) { + return new Date(year, month, 1).getDay(); + }, + + /* Determines if we should allow a "next/prev" month display change. */ + _canAdjustMonth: function(inst, offset, curYear, curMonth) { + var numMonths = this._getNumberOfMonths(inst), + date = this._daylightSavingAdjust(new Date(curYear, + curMonth + (offset < 0 ? offset : numMonths[0] * numMonths[1]), 1)); + + if (offset < 0) { + date.setDate(this._getDaysInMonth(date.getFullYear(), date.getMonth())); + } + return this._isInRange(inst, date); + }, + + /* Is the given date in the accepted range? */ + _isInRange: function(inst, date) { + var yearSplit, currentYear, + minDate = this._getMinMaxDate(inst, "min"), + maxDate = this._getMinMaxDate(inst, "max"), + minYear = null, + maxYear = null, + years = this._get(inst, "yearRange"); + if (years){ + yearSplit = years.split(":"); + currentYear = new Date().getFullYear(); + minYear = parseInt(yearSplit[0], 10); + maxYear = parseInt(yearSplit[1], 10); + if ( yearSplit[0].match(/[+\-].*/) ) { + minYear += currentYear; + } + if ( yearSplit[1].match(/[+\-].*/) ) { + maxYear += currentYear; + } + } + + return ((!minDate || date.getTime() >= minDate.getTime()) && + (!maxDate || date.getTime() <= maxDate.getTime()) && + (!minYear || date.getFullYear() >= minYear) && + (!maxYear || date.getFullYear() <= maxYear)); + }, + + /* Provide the configuration settings for formatting/parsing. */ + _getFormatConfig: function(inst) { + var shortYearCutoff = this._get(inst, "shortYearCutoff"); + shortYearCutoff = (typeof shortYearCutoff !== "string" ? shortYearCutoff : + new Date().getFullYear() % 100 + parseInt(shortYearCutoff, 10)); + return {shortYearCutoff: shortYearCutoff, + dayNamesShort: this._get(inst, "dayNamesShort"), dayNames: this._get(inst, "dayNames"), + monthNamesShort: this._get(inst, "monthNamesShort"), monthNames: this._get(inst, "monthNames")}; + }, + + /* Format the given date for display. */ + _formatDate: function(inst, day, month, year) { + if (!day) { + inst.currentDay = inst.selectedDay; + inst.currentMonth = inst.selectedMonth; + inst.currentYear = inst.selectedYear; + } + var date = (day ? (typeof day === "object" ? day : + this._daylightSavingAdjust(new Date(year, month, day))) : + this._daylightSavingAdjust(new Date(inst.currentYear, inst.currentMonth, inst.currentDay))); + return this.formatDate(this._get(inst, "dateFormat"), date, this._getFormatConfig(inst)); + } +}); + +/* + * Bind hover events for datepicker elements. + * Done via delegate so the binding only occurs once in the lifetime of the parent div. + * Global instActive, set by _updateDatepicker allows the handlers to find their way back to the active picker. + */ +function bindHover(dpDiv) { + var selector = "button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a"; + return dpDiv.delegate(selector, "mouseout", function() { + $(this).removeClass("ui-state-hover"); + if (this.className.indexOf("ui-datepicker-prev") !== -1) { + $(this).removeClass("ui-datepicker-prev-hover"); + } + if (this.className.indexOf("ui-datepicker-next") !== -1) { + $(this).removeClass("ui-datepicker-next-hover"); + } + }) + .delegate(selector, "mouseover", function(){ + if (!$.datepicker._isDisabledDatepicker( instActive.inline ? dpDiv.parent()[0] : instActive.input[0])) { + $(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"); + $(this).addClass("ui-state-hover"); + if (this.className.indexOf("ui-datepicker-prev") !== -1) { + $(this).addClass("ui-datepicker-prev-hover"); + } + if (this.className.indexOf("ui-datepicker-next") !== -1) { + $(this).addClass("ui-datepicker-next-hover"); + } + } + }); +} + +/* jQuery extend now ignores nulls! */ +function extendRemove(target, props) { + $.extend(target, props); + for (var name in props) { + if (props[name] == null) { + target[name] = props[name]; + } + } + return target; +} + +/* Invoke the datepicker functionality. + @param options string - a command, optionally followed by additional parameters or + Object - settings for attaching new datepicker functionality + @return jQuery object */ +$.fn.datepicker = function(options){ + + /* Verify an empty collection wasn't passed - Fixes #6976 */ + if ( !this.length ) { + return this; + } + + /* Initialise the date picker. */ + if (!$.datepicker.initialized) { + $(document).mousedown($.datepicker._checkExternalClick); + $.datepicker.initialized = true; + } + + /* Append datepicker main container to body if not exist. */ + if ($("#"+$.datepicker._mainDivId).length === 0) { + $("body").append($.datepicker.dpDiv); + } + + var otherArgs = Array.prototype.slice.call(arguments, 1); + if (typeof options === "string" && (options === "isDisabled" || options === "getDate" || options === "widget")) { + return $.datepicker["_" + options + "Datepicker"]. + apply($.datepicker, [this[0]].concat(otherArgs)); + } + if (options === "option" && arguments.length === 2 && typeof arguments[1] === "string") { + return $.datepicker["_" + options + "Datepicker"]. + apply($.datepicker, [this[0]].concat(otherArgs)); + } + return this.each(function() { + typeof options === "string" ? + $.datepicker["_" + options + "Datepicker"]. + apply($.datepicker, [this].concat(otherArgs)) : + $.datepicker._attachDatepicker(this, options); + }); +}; + +$.datepicker = new Datepicker(); // singleton instance +$.datepicker.initialized = false; +$.datepicker.uuid = new Date().getTime(); +$.datepicker.version = "1.10.4"; + +})(jQuery); +(function( $, undefined ) { + +var sizeRelatedOptions = { + buttons: true, + height: true, + maxHeight: true, + maxWidth: true, + minHeight: true, + minWidth: true, + width: true + }, + resizableRelatedOptions = { + maxHeight: true, + maxWidth: true, + minHeight: true, + minWidth: true + }; + +$.widget( "ui.dialog", { + version: "1.10.4", + options: { + appendTo: "body", + autoOpen: true, + buttons: [], + closeOnEscape: true, + closeText: "close", + dialogClass: "", + draggable: true, + hide: null, + height: "auto", + maxHeight: null, + maxWidth: null, + minHeight: 150, + minWidth: 150, + modal: false, + position: { + my: "center", + at: "center", + of: window, + collision: "fit", + // Ensure the titlebar is always visible + using: function( pos ) { + var topOffset = $( this ).css( pos ).offset().top; + if ( topOffset < 0 ) { + $( this ).css( "top", pos.top - topOffset ); + } + } + }, + resizable: true, + show: null, + title: null, + width: 300, + + // callbacks + beforeClose: null, + close: null, + drag: null, + dragStart: null, + dragStop: null, + focus: null, + open: null, + resize: null, + resizeStart: null, + resizeStop: null + }, + + _create: function() { + this.originalCss = { + display: this.element[0].style.display, + width: this.element[0].style.width, + minHeight: this.element[0].style.minHeight, + maxHeight: this.element[0].style.maxHeight, + height: this.element[0].style.height + }; + this.originalPosition = { + parent: this.element.parent(), + index: this.element.parent().children().index( this.element ) + }; + this.originalTitle = this.element.attr("title"); + this.options.title = this.options.title || this.originalTitle; + + this._createWrapper(); + + this.element + .show() + .removeAttr("title") + .addClass("ui-dialog-content ui-widget-content") + .appendTo( this.uiDialog ); + + this._createTitlebar(); + this._createButtonPane(); + + if ( this.options.draggable && $.fn.draggable ) { + this._makeDraggable(); + } + if ( this.options.resizable && $.fn.resizable ) { + this._makeResizable(); + } + + this._isOpen = false; + }, + + _init: function() { + if ( this.options.autoOpen ) { + this.open(); + } + }, + + _appendTo: function() { + var element = this.options.appendTo; + if ( element && (element.jquery || element.nodeType) ) { + return $( element ); + } + return this.document.find( element || "body" ).eq( 0 ); + }, + + _destroy: function() { + var next, + originalPosition = this.originalPosition; + + this._destroyOverlay(); + + this.element + .removeUniqueId() + .removeClass("ui-dialog-content ui-widget-content") + .css( this.originalCss ) + // Without detaching first, the following becomes really slow + .detach(); + + this.uiDialog.stop( true, true ).remove(); + + if ( this.originalTitle ) { + this.element.attr( "title", this.originalTitle ); + } + + next = originalPosition.parent.children().eq( originalPosition.index ); + // Don't try to place the dialog next to itself (#8613) + if ( next.length && next[0] !== this.element[0] ) { + next.before( this.element ); + } else { + originalPosition.parent.append( this.element ); + } + }, + + widget: function() { + return this.uiDialog; + }, + + disable: $.noop, + enable: $.noop, + + close: function( event ) { + var activeElement, + that = this; + + if ( !this._isOpen || this._trigger( "beforeClose", event ) === false ) { + return; + } + + this._isOpen = false; + this._destroyOverlay(); + + if ( !this.opener.filter(":focusable").focus().length ) { + + // support: IE9 + // IE9 throws an "Unspecified error" accessing document.activeElement from an