diff --git a/.gitignore b/.gitignore index a92c3ab39d..5c29ccd9b5 100644 --- a/.gitignore +++ b/.gitignore @@ -40,3 +40,13 @@ coverage.txt btcec/coverage.txt btcutil/coverage.txt btcutil/psbt/coverage.txt + +# vim +*.swp + +# Binaries produced by "make build" +/addblock +/btcctl +/btcd +/findcheckpoint +/gencerts diff --git a/Dockerfile b/Dockerfile index 58e4b59aec..5ed3e63c10 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,7 +24,6 @@ ARG ARCH=amd64 FROM golang@sha256:c80567372be0d486766593cc722d3401038e2f150a0f6c5c719caa63afb4026a AS build-container ARG ARCH -ENV GO111MODULE=on ADD . /app WORKDIR /app @@ -35,7 +34,7 @@ RUN set -ex \ && echo "Compiling for $GOARCH" \ && go install -v . ./cmd/... -FROM $ARCH/alpine:3.12 +FROM $ARCH/alpine:3.16 COPY --from=build-container /go/bin /bin diff --git a/LICENSE b/LICENSE index 23190babb7..46dcd39508 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ ISC License -Copyright (c) 2013-2022 The btcsuite developers +Copyright (c) 2013-2023 The btcsuite developers Copyright (c) 2015-2016 The Decred developers Permission to use, copy, modify, and distribute this software for any diff --git a/Makefile b/Makefile index 2e967ba0f6..5bfb1aa6a5 100644 --- a/Makefile +++ b/Makefile @@ -11,12 +11,12 @@ GOACC_BIN := $(GO_BIN)/go-acc LINT_COMMIT := v1.18.0 GOACC_COMMIT := 80342ae2e0fcf265e99e76bcc4efd022c7c3811b -DEPGET := cd /tmp && GO111MODULE=on go get -v -GOBUILD := GO111MODULE=on go build -v -GOINSTALL := GO111MODULE=on go install -v +DEPGET := cd /tmp && go get -v +GOBUILD := go build -v +GOINSTALL := go install -v DEV_TAGS := rpctest -GOTEST_DEV = GO111MODULE=on go test -v -tags=$(DEV_TAGS) -GOTEST := GO111MODULE=on go test -v +GOTEST_DEV = go test -v -tags=$(DEV_TAGS) +GOTEST := go test -v GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") @@ -71,6 +71,19 @@ build: $(GOBUILD) $(PKG)/cmd/findcheckpoint $(GOBUILD) $(PKG)/cmd/addblock +install: + @$(call print, "Installing all binaries") + $(GOINSTALL) $(PKG) + $(GOINSTALL) $(PKG)/cmd/btcctl + $(GOINSTALL) $(PKG)/cmd/gencerts + $(GOINSTALL) $(PKG)/cmd/findcheckpoint + $(GOINSTALL) $(PKG)/cmd/addblock + +release-install: + @$(call print, "Installing btcd and btcctl release binaries") + env CGO_ENABLED=0 $(GOINSTALL) -trimpath -ldflags="-s -w -buildid=" $(PKG) + env CGO_ENABLED=0 $(GOINSTALL) -trimpath -ldflags="-s -w -buildid=" $(PKG)/cmd/btcctl + # ======= # TESTING # ======= diff --git a/README.md b/README.md index 5ec1454fcc..f70f3f9145 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ which are both under active development. ## Requirements -[Go](http://golang.org) 1.16 or newer. +[Go](http://golang.org) 1.17 or newer. ## Installation @@ -63,7 +63,7 @@ recommended that `GOPATH` is set to a directory in your home directory such as ```bash $ cd $GOPATH/src/github.com/btcsuite/btcd -$ GO111MODULE=on go install -v . ./cmd/... +$ go install -v . ./cmd/... ``` - btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did @@ -79,7 +79,7 @@ $ GO111MODULE=on go install -v . ./cmd/... ```bash $ cd $GOPATH/src/github.com/btcsuite/btcd $ git pull -$ GO111MODULE=on go install -v . ./cmd/... +$ go install -v . ./cmd/... ``` ## Getting Started diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..e06625c87a --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,15 @@ +# Security Policy + +## Supported Versions + +The last major `btcd` release is to be considered the current support version. +Given an issue severe enough, a backport will be issued either to the prior +major release or the set of releases considered utilized enough. + +## Reporting a Vulnerability + +To report security issues, send an email to security@lightning.engineering +(this list isn't to be used for support). + +The following key can be used to communicate sensitive information: `91FE 464C +D751 01DA 6B6B AB60 555C 6465 E5BC B3AF`. diff --git a/addrmgr/doc.go b/addrmgr/doc.go index 8ddc8bfdfb..c500fbb5be 100644 --- a/addrmgr/doc.go +++ b/addrmgr/doc.go @@ -5,7 +5,7 @@ /* Package addrmgr implements concurrency safe Bitcoin address manager. -Address Manager Overview +# Address Manager Overview In order maintain the peer-to-peer Bitcoin network, there needs to be a source of addresses to connect to as nodes come and go. The Bitcoin protocol provides diff --git a/addrmgr/network.go b/addrmgr/network.go index 7f30901b21..95555a69c5 100644 --- a/addrmgr/network.go +++ b/addrmgr/network.go @@ -20,7 +20,7 @@ var ( ipNet("192.168.0.0", 16, 32), } - // rfc2544Net specifies the the IPv4 block as defined by RFC2544 + // rfc2544Net specifies the IPv4 block as defined by RFC2544 // (198.18.0.0/15) rfc2544Net = ipNet("198.18.0.0", 15, 32) diff --git a/blockchain/accept.go b/blockchain/accept.go index 44ccbf997a..935963148f 100644 --- a/blockchain/accept.go +++ b/blockchain/accept.go @@ -7,8 +7,8 @@ package blockchain import ( "fmt" - "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/database" ) // maybeAcceptBlock potentially accepts a block into the block chain and, if diff --git a/blockchain/bench_test.go b/blockchain/bench_test.go index eee4340bc8..db6f415013 100644 --- a/blockchain/bench_test.go +++ b/blockchain/bench_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/wire" ) // BenchmarkIsCoinBase performs a simple benchmark against the IsCoinBase @@ -29,3 +30,46 @@ func BenchmarkIsCoinBaseTx(b *testing.B) { IsCoinBaseTx(tx) } } + +func BenchmarkUtxoFetchMap(b *testing.B) { + block := Block100000 + transactions := block.Transactions + b.ResetTimer() + + for i := 0; i < b.N; i++ { + needed := make(map[wire.OutPoint]struct{}, len(transactions)) + for _, tx := range transactions[1:] { + for _, txIn := range tx.TxIn { + needed[txIn.PreviousOutPoint] = struct{}{} + } + } + } +} + +func BenchmarkUtxoFetchSlices(b *testing.B) { + block := Block100000 + transactions := block.Transactions + b.ResetTimer() + + for i := 0; i < b.N; i++ { + needed := make([]wire.OutPoint, 0, len(transactions)) + for _, tx := range transactions[1:] { + for _, txIn := range tx.TxIn { + needed = append(needed, txIn.PreviousOutPoint) + } + } + } +} + +func BenchmarkAncestor(b *testing.B) { + height := 1 << 19 + blockNodes := chainedNodes(nil, height) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + blockNodes[len(blockNodes)-1].Ancestor(0) + for j := 0; j <= 19; j++ { + blockNodes[len(blockNodes)-1].Ancestor(1 << j) + } + } +} diff --git a/blockchain/blockindex.go b/blockchain/blockindex.go index 2ff2fa27c4..ca3235f79f 100644 --- a/blockchain/blockindex.go +++ b/blockchain/blockindex.go @@ -74,6 +74,9 @@ type blockNode struct { // parent is the parent block for this node. parent *blockNode + // ancestor is a block that is more than one block back from this node. + ancestor *blockNode + // hash is the double sha 256 of the block. hash chainhash.Hash @@ -119,6 +122,7 @@ func initBlockNode(node *blockNode, blockHeader *wire.BlockHeader, parent *block node.parent = parent node.height = parent.height + 1 node.workSum = node.workSum.Add(parent.workSum, node.workSum) + node.buildAncestor() } } @@ -150,6 +154,26 @@ func (node *blockNode) Header() wire.BlockHeader { } } +// invertLowestOne turns the lowest 1 bit in the binary representation of a number into a 0. +func invertLowestOne(n int32) int32 { + return n & (n - 1) +} + +// getAncestorHeight returns a suitable ancestor for the node at the given height. +func getAncestorHeight(height int32) int32 { + // We pop off two 1 bits of the height. + // This results in a maximum of 330 steps to go back to an ancestor + // from height 1<<29. + return invertLowestOne(invertLowestOne(height)) +} + +// buildAncestor sets an ancestor for the given blocknode. +func (node *blockNode) buildAncestor() { + if node.parent != nil { + node.ancestor = node.parent.Ancestor(getAncestorHeight(node.height)) + } +} + // Ancestor returns the ancestor block node at the provided height by following // the chain backwards from this node. The returned block will be nil when a // height is requested that is after the height of the passed node or is less @@ -161,14 +185,81 @@ func (node *blockNode) Ancestor(height int32) *blockNode { return nil } + // Traverse back until we find the desired node. n := node - for ; n != nil && n.height != height; n = n.parent { - // Intentionally left blank + for n != nil && n.height != height { + // If there's an ancestor available, use it. Otherwise, just + // follow the parent. + if n.ancestor != nil { + // Calculate the height for this ancestor and + // check if we can take the ancestor skip. + if getAncestorHeight(n.height) >= height { + n = n.ancestor + continue + } + } + + // We couldn't take the ancestor skip so traverse back to the parent. + n = n.parent } return n } +// Height returns the blockNode's height in the chain. +// +// NOTE: Part of the HeaderCtx interface. +func (node *blockNode) Height() int32 { + return node.height +} + +// Bits returns the blockNode's nBits. +// +// NOTE: Part of the HeaderCtx interface. +func (node *blockNode) Bits() uint32 { + return node.bits +} + +// Timestamp returns the blockNode's timestamp. +// +// NOTE: Part of the HeaderCtx interface. +func (node *blockNode) Timestamp() int64 { + return node.timestamp +} + +// Parent returns the blockNode's parent. +// +// NOTE: Part of the HeaderCtx interface. +func (node *blockNode) Parent() HeaderCtx { + if node.parent == nil { + // This is required since node.parent is a *blockNode and if we + // do not explicitly return nil here, the caller may fail when + // nil-checking this. + return nil + } + + return node.parent +} + +// RelativeAncestorCtx returns the blockNode's ancestor that is distance blocks +// before it in the chain. This is equivalent to the RelativeAncestor function +// below except that the return type is different. +// +// This function is safe for concurrent access. +// +// NOTE: Part of the HeaderCtx interface. +func (node *blockNode) RelativeAncestorCtx(distance int32) HeaderCtx { + ancestor := node.RelativeAncestor(distance) + if ancestor == nil { + // This is required since RelativeAncestor returns a *blockNode + // and if we do not explicitly return nil here, the caller may + // fail when nil-checking this. + return nil + } + + return ancestor +} + // RelativeAncestor returns the ancestor block node a relative 'distance' blocks // before this node. This is equivalent to calling Ancestor with the node's // height minus provided distance. @@ -182,17 +273,17 @@ func (node *blockNode) RelativeAncestor(distance int32) *blockNode { // prior to, and including, the block node. // // This function is safe for concurrent access. -func (node *blockNode) CalcPastMedianTime() time.Time { +func CalcPastMedianTime(node HeaderCtx) time.Time { // Create a slice of the previous few block timestamps used to calculate // the median per the number defined by the constant medianTimeBlocks. timestamps := make([]int64, medianTimeBlocks) numNodes := 0 iterNode := node for i := 0; i < medianTimeBlocks && iterNode != nil; i++ { - timestamps[i] = iterNode.timestamp + timestamps[i] = iterNode.Timestamp() numNodes++ - iterNode = iterNode.parent + iterNode = iterNode.Parent() } // Prune the slice to the actual number of available timestamps which @@ -217,6 +308,10 @@ func (node *blockNode) CalcPastMedianTime() time.Time { return time.Unix(medianTimestamp, 0) } +// A compile-time assertion to ensure blockNode implements the HeaderCtx +// interface. +var _ HeaderCtx = (*blockNode)(nil) + // blockIndex provides facilities for keeping track of an in-memory index of the // block chain. Although the name block chain suggests a single chain of // blocks, it is actually a tree-shaped structure where any node can have @@ -319,6 +414,44 @@ func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) { bi.Unlock() } +// InactiveTips returns all the block nodes that aren't in the best chain. +// +// This function is safe for concurrent access. +func (bi *blockIndex) InactiveTips(bestChain *chainView) []*blockNode { + bi.RLock() + defer bi.RUnlock() + + // Look through the entire blockindex and look for nodes that aren't in + // the best chain. We're gonna keep track of all the orphans and the parents + // of the orphans. + orphans := make(map[chainhash.Hash]*blockNode) + orphanParent := make(map[chainhash.Hash]*blockNode) + for hash, node := range bi.index { + found := bestChain.Contains(node) + if !found { + orphans[hash] = node + orphanParent[node.parent.hash] = node.parent + } + } + + // If an orphan isn't pointed to by another orphan, it is a chain tip. + // + // We can check this by looking for the orphan in the orphan parent map. + // If the orphan exists in the orphan parent map, it means that another + // orphan is pointing to it. + tips := make([]*blockNode, 0, len(orphans)) + for hash, orphan := range orphans { + _, found := orphanParent[hash] + if !found { + tips = append(tips, orphan) + } + + delete(orphanParent, hash) + } + + return tips +} + // flushToDB writes all dirty block nodes to the database. If all writes // succeed, this clears the dirty set. func (bi *blockIndex) flushToDB() error { diff --git a/blockchain/blockindex_test.go b/blockchain/blockindex_test.go new file mode 100644 index 0000000000..cd08969f14 --- /dev/null +++ b/blockchain/blockindex_test.go @@ -0,0 +1,42 @@ +// Copyright (c) 2023 The utreexo developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blockchain + +import ( + "math/rand" + "testing" +) + +func TestAncestor(t *testing.T) { + height := 500_000 + blockNodes := chainedNodes(nil, height) + + for i, blockNode := range blockNodes { + // Grab a random node that's a child of this node + // and try to fetch the current blockNode with Ancestor. + randNode := blockNodes[rand.Intn(height-i)+i] + got := randNode.Ancestor(blockNode.height) + + // See if we got the right one. + if got.hash != blockNode.hash { + t.Fatalf("expected ancestor at height %d "+ + "but got a node at height %d", + blockNode.height, got.height) + } + + // Gensis doesn't have ancestors so skip the check below. + if blockNode.height == 0 { + continue + } + + // The ancestors are deterministic so check that this node's + // ancestor is the correct one. + if blockNode.ancestor.height != getAncestorHeight(blockNode.height) { + t.Fatalf("expected anestor at height %d, but it was at %d", + getAncestorHeight(blockNode.height), + blockNode.ancestor.height) + } + } +} diff --git a/blockchain/chain.go b/blockchain/chain.go index 4d1a839441..60420022ac 100644 --- a/blockchain/chain.go +++ b/blockchain/chain.go @@ -34,8 +34,9 @@ const ( // from the block being located. // // For example, assume a block chain with a side chain as depicted below: -// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 -// \-> 16a -> 17a +// +// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 +// \-> 16a -> 17a // // The block locator for block 17a would be the hashes of blocks: // [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis] @@ -114,6 +115,10 @@ type BlockChain struct { // fields in this struct below this point. chainLock sync.RWMutex + // pruneTarget is the size in bytes the database targets for when the node + // is pruned. + pruneTarget uint64 + // These fields are related to the memory block index. They both have // their own locks, however they are often also protected by the chain // lock to help prevent logic races when blocks are being processed. @@ -126,6 +131,10 @@ type BlockChain struct { index *blockIndex bestChain *chainView + // The UTXO state holds a cached view of the UTXO state of the chain. + // It is protected by the chain lock. + utxoCache *utxoCache + // These fields are related to handling of orphan blocks. They are // protected by a combination of the chain lock and the orphan lock. orphanLock sync.RWMutex @@ -386,7 +395,7 @@ func (b *BlockChain) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView // return sequence lock values of -1 indicating that this transaction // can be included within a block at any given height or time. mTx := tx.MsgTx() - sequenceLockActive := mTx.Version >= 2 && csvSoftforkActive + sequenceLockActive := uint32(mTx.Version) >= 2 && csvSoftforkActive if !sequenceLockActive || IsCoinBase(tx) { return sequenceLock, nil } @@ -436,7 +445,7 @@ func (b *BlockChain) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView prevInputHeight = 0 } blockNode := node.Ancestor(prevInputHeight) - medianTime := blockNode.CalcPastMedianTime() + medianTime := CalcPastMedianTime(blockNode) // Time based relative time-locks as defined by BIP 68 // have a time granularity of RelativeLockSeconds, so @@ -468,7 +477,7 @@ func (b *BlockChain) calcSequenceLock(node *blockNode, tx *btcutil.Tx, utxoView // LockTimeToSequence converts the passed relative locktime to a sequence // number in accordance to BIP-68. // See: https://github.com/bitcoin/bips/blob/master/bip-0068.mediawiki -// * (Compatibility) +// - (Compatibility) func LockTimeToSequence(isSeconds bool, locktime uint32) uint32 { // If we're expressing the relative lock time in blocks, then the // corresponding sequence number is simply the desired input age. @@ -546,9 +555,14 @@ func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List // connectBlock handles connecting the passed node/block to the end of the main // (best) chain. // -// This passed utxo view must have all referenced txos the block spends marked -// as spent and all of the new txos the block creates added to it. In addition, -// the passed stxos slice must be populated with all of the information for the +// Passing in a utxo view is optional. If the passed in utxo view is nil, +// connectBlock will assume that the utxo cache has already connected all the +// txs in the block being connected. +// If a utxo view is passed in, this passed utxo view must have all referenced +// txos the block spends marked as spent and all of the new txos the block creates +// added to it. +// +// The passed stxos slice must be populated with all of the information for the // spent txos. This approach is used because the connection validation that // must happen prior to calling this function requires the same details, so // it would be inefficient to repeat it. @@ -594,10 +608,61 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, blockSize := uint64(block.MsgBlock().SerializeSize()) blockWeight := uint64(GetBlockWeight(block)) state := newBestState(node, blockSize, blockWeight, numTxns, - curTotalTxns+numTxns, node.CalcPastMedianTime()) + curTotalTxns+numTxns, CalcPastMedianTime(node), + ) + + // If a utxoviewpoint was passed in, we'll be writing that viewpoint + // directly to the database on disk. In order for the database to be + // consistent, we must flush the cache before writing the viewpoint. + if view != nil { + err = b.db.Update(func(dbTx database.Tx) error { + return b.utxoCache.flush(dbTx, FlushRequired, state) + }) + if err != nil { + return err + } + } // Atomically insert info into the database. err = b.db.Update(func(dbTx database.Tx) error { + // If the pruneTarget isn't 0, we should attempt to delete older blocks + // from the database. + if b.pruneTarget != 0 { + // When the total block size is under the prune target, prune blocks is + // a no-op and the deleted hashes are nil. + deletedHashes, err := dbTx.PruneBlocks(b.pruneTarget) + if err != nil { + return err + } + + // Only attempt to delete if we have any deleted blocks. + if len(deletedHashes) != 0 { + // Delete the spend journals of the pruned blocks. + err = dbPruneSpendJournalEntry(dbTx, deletedHashes) + if err != nil { + return err + } + + // We may need to flush if the prune will delete blocks that + // are past our last flush block. + // + // NOTE: the database will never be inconsistent here as the + // actual blocks are not deleted until the db.Update returns. + needsFlush, err := b.flushNeededAfterPrune(deletedHashes) + if err != nil { + return err + } + if needsFlush { + // Since the deleted hashes are past our last + // flush block, flush the utxo cache now. + err = b.utxoCache.flush(dbTx, FlushRequired, state) + if err != nil { + return err + } + } + } + } + // Update best block state. err := dbPutBestState(dbTx, state, node.workSum) if err != nil { @@ -614,6 +679,8 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, // Update the utxo set using the state of the utxo view. This // entails removing all of the utxos spent and adding the new // ones created by the block. + // + // A nil viewpoint is a no-op. err = dbPutUtxoView(dbTx, view) if err != nil { return err @@ -644,7 +711,9 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, // Prune fully spent entries and mark all entries in the view unmodified // now that the modifications have been committed to the database. - view.commit() + if view != nil { + view.commit() + } // This node is now the end of the best chain. b.bestChain.SetTip(node) @@ -665,7 +734,11 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, b.sendNotification(NTBlockConnected, block) b.chainLock.Lock() - return nil + // Since we may have changed the UTXO cache, we make sure it didn't exceed its + // maximum size. If we're pruned and have flushed already, this will be a no-op. + return b.db.Update(func(dbTx database.Tx) error { + return b.utxoCache.flush(dbTx, FlushIfNeeded, state) + }) } // disconnectBlock handles disconnecting the passed node/block from the end of @@ -707,7 +780,7 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view blockWeight := uint64(GetBlockWeight(prevBlock)) newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions)) state := newBestState(prevNode, blockSize, blockWeight, numTxns, - newTotalTxns, prevNode.CalcPastMedianTime()) + newTotalTxns, CalcPastMedianTime(prevNode)) err = b.db.Update(func(dbTx database.Tx) error { // Update best block state. @@ -814,6 +887,15 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error return nil } + // The rest of the reorg depends on all STXOs already being in the database + // so we flush before reorg. + err := b.db.Update(func(dbTx database.Tx) error { + return b.utxoCache.flush(dbTx, FlushRequired, b.BestSnapshot()) + }) + if err != nil { + return err + } + // Ensure the provided nodes match the current best chain. tip := b.bestChain.Tip() if detachNodes.Len() != 0 { @@ -875,7 +957,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error // Load all of the utxos referenced by the block that aren't // already in the view. - err = view.fetchInputUtxos(b.db, block) + err = view.fetchInputUtxos(b.db, nil, block) if err != nil { return err } @@ -942,7 +1024,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error // checkConnectBlock gets skipped, we still need to update the UTXO // view. if b.index.NodeStatus(n).KnownValid() { - err = view.fetchInputUtxos(b.db, block) + err = view.fetchInputUtxos(b.db, nil, block) if err != nil { return err } @@ -994,7 +1076,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error // Load all of the utxos referenced by the block that aren't // already in the view. - err := view.fetchInputUtxos(b.db, block) + err := view.fetchInputUtxos(b.db, nil, block) if err != nil { return err } @@ -1021,7 +1103,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error // Load all of the utxos referenced by the block that aren't // already in the view. - err := view.fetchInputUtxos(b.db, block) + err := view.fetchInputUtxos(b.db, nil, block) if err != nil { return err } @@ -1043,6 +1125,15 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error } } + // We call the flush at the end to update the last flush hash to the new + // best tip. + err = b.db.Update(func(dbTx database.Tx) error { + return b.utxoCache.flush(dbTx, FlushRequired, b.BestSnapshot()) + }) + if err != nil { + return err + } + // Log the point where the chain forked and old and new best chain // heads. if forkNode != nil { @@ -1067,8 +1158,8 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error // a reorganization to become the main chain). // // The flags modify the behavior of this function as follows: -// - BFFastAdd: Avoids several expensive transaction validation operations. -// This is useful when using checkpoints. +// - BFFastAdd: Avoids several expensive transaction validation operations. +// This is useful when using checkpoints. // // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, flags BehaviorFlags) (bool, error) { @@ -1095,11 +1186,21 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla // Perform several checks to verify the block can be connected // to the main chain without violating any rules and without // actually connecting the block. - view := NewUtxoViewpoint() - view.SetBestHash(parentHash) - stxos := make([]SpentTxOut, 0, countSpentOutputs(block)) if !fastAdd { - err := b.checkConnectBlock(node, block, view, &stxos) + // We create a viewpoint here to avoid spending or adding new + // coins to the utxo cache. + // + // checkConnectBlock spends and adds utxos before doing the + // signature validation and if the signature validation fails, + // we would be forced to undo the utxo cache. + // + // TODO (kcalvinalvin): Doing all of the validation before connecting + // the tx inside check connect block would allow us to pass the utxo + // cache directly to the check connect block. This would save on the + // expensive memory allocation done by fetch input utxos. + view := NewUtxoViewpoint() + view.SetBestHash(parentHash) + err := b.checkConnectBlock(node, block, view, nil) if err == nil { b.index.SetStatusFlags(node, statusValid) } else if _, ok := err.(RuleError); ok { @@ -1115,23 +1216,16 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla } } - // In the fast add case the code to check the block connection - // was skipped, so the utxo view needs to load the referenced - // utxos, spend them, and add the new utxos being created by - // this block. - if fastAdd { - err := view.fetchInputUtxos(b.db, block) - if err != nil { - return false, err - } - err = view.connectTransactions(block, &stxos) - if err != nil { - return false, err - } + // Connect the transactions to the cache. All the txs are considered valid + // at this point as they have passed validation or was considered valid already. + stxos := make([]SpentTxOut, 0, countSpentOutputs(block)) + err := b.utxoCache.connectTransactions(block, &stxos) + if err != nil { + return false, err } // Connect the block to the main chain. - err := b.connectBlock(node, block, view, stxos) + err = b.connectBlock(node, block, nil, stxos) if err != nil { // If we got hit with a rule error, then we'll mark // that status of the block as invalid and flush the @@ -1207,8 +1301,8 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla // isCurrent returns whether or not the chain believes it is current. Several // factors are used to guess, but the key factors that allow the chain to // believe it is current are: -// - Latest block height is after the latest checkpoint (if enabled) -// - Latest block has a timestamp newer than 24 hours ago +// - Latest block height is after the latest checkpoint (if enabled) +// - Latest block has a timestamp newer than 24 hours ago // // This function MUST be called with the chain state lock held (for reads). func (b *BlockChain) isCurrent() bool { @@ -1231,8 +1325,8 @@ func (b *BlockChain) isCurrent() bool { // IsCurrent returns whether or not the chain believes it is current. Several // factors are used to guess, but the key factors that allow the chain to // believe it is current are: -// - Latest block height is after the latest checkpoint (if enabled) -// - Latest block has a timestamp newer than 24 hours ago +// - Latest block height is after the latest checkpoint (if enabled) +// - Latest block has a timestamp newer than 24 hours ago // // This function is safe for concurrent access. func (b *BlockChain) IsCurrent() bool { @@ -1254,6 +1348,119 @@ func (b *BlockChain) BestSnapshot() *BestState { return snapshot } +// TipStatus is the status of a chain tip. +type TipStatus byte + +const ( + // StatusUnknown indicates that the tip status isn't any of the defined + // statuses. + StatusUnknown TipStatus = iota + + // StatusActive indicates that the tip is considered active and is in + // the best chain. + StatusActive + + // StatusInvalid indicates that this tip or any of the ancestors of this + // tip are invalid. + StatusInvalid + + // StatusValidFork is given if: + // 1: Not a part of the best chain. + // 2: Is not invalid. + // 3: Has the block data stored to disk. + StatusValidFork +) + +// String returns the status flags as string. +func (ts TipStatus) String() string { + switch ts { + case StatusActive: + return "active" + case StatusInvalid: + return "invalid" + case StatusValidFork: + return "valid-fork" + } + return fmt.Sprintf("unknown: %b", ts) +} + +// ChainTip represents the last block in a branch of the block tree. +type ChainTip struct { + // Height of the tip. + Height int32 + + // BlockHash hash of the tip. + BlockHash chainhash.Hash + + // BranchLen is length of the fork point of this chain from the main chain. + // Returns 0 if the chain tip is a part of the best chain. + BranchLen int32 + + // Status is the validity status of the branch this tip is in. + Status TipStatus +} + +// ChainTips returns all the chain tips the node itself is aware of. Each tip is +// represented by its height, block hash, branch length, and status. +// +// This function is safe for concurrent access. +func (b *BlockChain) ChainTips() []ChainTip { + b.chainLock.RLock() + defer b.chainLock.RUnlock() + + // Grab all the inactive tips. + tips := b.index.InactiveTips(b.bestChain) + + // Add the current tip. + tips = append(tips, b.bestChain.Tip()) + + chainTips := make([]ChainTip, 0, len(tips)) + + // Go through all the tips and grab the height, hash, branch length, and the block + // status. + for _, tip := range tips { + var status TipStatus + switch { + // The tip is considered active if it's in the best chain. + case b.bestChain.Contains(tip): + status = StatusActive + + // This block or any of the ancestors of this block are invalid. + case tip.status.KnownInvalid(): + status = StatusInvalid + + // If the tip meets the following criteria: + // 1: Not a part of the best chain. + // 2: Is not invalid. + // 3: Has the block data stored to disk. + // + // The tip is considered a valid fork. + // + // We can check if a tip is a valid-fork by checking that + // its data is available. Since the behavior is to give a + // block node the statusDataStored status once it passes + // the proof of work checks and basic chain validity checks. + // + // We can't use the KnownValid status since it's only given + // to blocks that passed the validation AND were a part of + // the bestChain. + case tip.status.HaveData(): + status = StatusValidFork + } + + chainTip := ChainTip{ + Height: tip.height, + BlockHash: tip.hash, + BranchLen: tip.height - b.bestChain.FindFork(tip).height, + Status: status, + } + + chainTips = append(chainTips, chainTip) + } + + return chainTips +} + // HeaderByHash returns the block header identified by the given hash or an // error if it doesn't exist. Note that this will return headers from both the // main and side chains. @@ -1467,11 +1674,11 @@ func (b *BlockChain) IntervalBlockHashes(endHash *chainhash.Hash, interval int, // // In addition, there are two special cases: // -// - When no locators are provided, the stop hash is treated as a request for -// that block, so it will either return the node associated with the stop hash -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, nodes starting -// after the genesis block will be returned +// - When no locators are provided, the stop hash is treated as a request for +// that block, so it will either return the node associated with the stop hash +// if it is known, or nil if it is unknown +// - When locators are provided, but none of them are known, nodes starting +// after the genesis block will be returned // // This is primarily a helper function for the locateBlocks and locateHeaders // functions. @@ -1555,11 +1762,11 @@ func (b *BlockChain) locateBlocks(locator BlockLocator, hashStop *chainhash.Hash // // In addition, there are two special cases: // -// - When no locators are provided, the stop hash is treated as a request for -// that block, so it will either return the stop hash itself if it is known, -// or nil if it is unknown -// - When locators are provided, but none of them are known, hashes starting -// after the genesis block will be returned +// - When no locators are provided, the stop hash is treated as a request for +// that block, so it will either return the stop hash itself if it is known, +// or nil if it is unknown +// - When locators are provided, but none of them are known, hashes starting +// after the genesis block will be returned // // This function is safe for concurrent access. func (b *BlockChain) LocateBlocks(locator BlockLocator, hashStop *chainhash.Hash, maxHashes uint32) []chainhash.Hash { @@ -1600,11 +1807,11 @@ func (b *BlockChain) locateHeaders(locator BlockLocator, hashStop *chainhash.Has // // In addition, there are two special cases: // -// - When no locators are provided, the stop hash is treated as a request for -// that header, so it will either return the header for the stop hash itself -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, headers starting -// after the genesis block will be returned +// - When no locators are provided, the stop hash is treated as a request for +// that header, so it will either return the header for the stop hash itself +// if it is known, or nil if it is unknown +// - When locators are provided, but none of them are known, headers starting +// after the genesis block will be returned // // This function is safe for concurrent access. func (b *BlockChain) LocateHeaders(locator BlockLocator, hashStop *chainhash.Hash) []wire.BlockHeader { @@ -1646,6 +1853,11 @@ type Config struct { // This field is required. DB database.DB + // The maximum size in bytes of the UTXO cache. + // + // This field is required. + UtxoCacheMaxSize uint64 + // Interrupt specifies a channel the caller can close to signal that // long running operations, such as catching up indexes or performing // database migrations, should be interrupted. @@ -1700,6 +1912,11 @@ type Config struct { // This field can be nil if the caller is not interested in using a // signature cache. HashCache *txscript.HashCache + + // Prune specifies the target database usage (in bytes) the database + // will target for with block files. Prune at 0 specifies that no + // blocks will be deleted. + Prune uint64 } // New returns a BlockChain instance using the provided configuration details. @@ -1749,12 +1966,14 @@ func New(config *Config) (*BlockChain, error) { maxRetargetTimespan: targetTimespan * adjustmentFactor, blocksPerRetarget: int32(targetTimespan / targetTimePerBlock), index: newBlockIndex(config.DB, params), + utxoCache: newUtxoCache(config.DB, config.UtxoCacheMaxSize), hashCache: config.HashCache, bestChain: newChainView(nil), orphans: make(map[chainhash.Hash]*orphanBlock), prevOrphans: make(map[chainhash.Hash][]*orphanBlock), warningCaches: newThresholdCaches(vbNumBits), deploymentCaches: newThresholdCaches(chaincfg.DefinedDeployments), + pruneTarget: config.Prune, } // Ensure all the deployments are synchronized with our clock if @@ -1797,10 +2016,23 @@ func New(config *Config) (*BlockChain, error) { return nil, err } + // Make sure the utxo state is catched up if it was left in an inconsistent + // state. bestNode := b.bestChain.Tip() + if err := b.InitConsistentState(bestNode, config.Interrupt); err != nil { + return nil, err + } log.Infof("Chain state (height %d, hash %v, totaltx %d, work %v)", bestNode.height, bestNode.hash, b.stateSnapshot.TotalTxns, bestNode.workSum) return &b, nil } + +// CachedStateSize returns the total size of the cached state of the blockchain +// in bytes. +func (b *BlockChain) CachedStateSize() uint64 { + b.chainLock.Lock() + defer b.chainLock.Unlock() + return b.utxoCache.totalMemoryUsage() +} diff --git a/blockchain/chain_test.go b/blockchain/chain_test.go index 34356326b9..1ac08f9a76 100644 --- a/blockchain/chain_test.go +++ b/blockchain/chain_test.go @@ -5,14 +5,15 @@ package blockchain import ( + "fmt" "reflect" "testing" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // TestHaveBlock tests the HaveBlock API to ensure proper functionality. @@ -163,13 +164,13 @@ func TestCalcSequenceLock(t *testing.T) { // Obtain the median time past from the PoV of the input created above. // The MTP for the input is the MTP from the PoV of the block *prior* // to the one that included it. - medianTime := node.RelativeAncestor(5).CalcPastMedianTime().Unix() + medianTime := CalcPastMedianTime(node.RelativeAncestor(5)).Unix() // The median time calculated from the PoV of the best block in the // test chain. For unconfirmed inputs, this value will be used since // the MTP will be calculated from the PoV of the yet-to-be-mined // block. - nextMedianTime := node.CalcPastMedianTime().Unix() + nextMedianTime := CalcPastMedianTime(node).Unix() nextBlockHeight := int32(numBlocksToActivate) + 1 // Add an additional transaction which will serve as our unconfirmed @@ -964,3 +965,193 @@ func TestIntervalBlockHashes(t *testing.T) { } } } + +func TestChainTips(t *testing.T) { + tests := []struct { + name string + chainTipGen func() (*BlockChain, map[chainhash.Hash]ChainTip) + }{ + { + name: "one active chain tip", + chainTipGen: func() (*BlockChain, map[chainhash.Hash]ChainTip) { + // Construct a synthetic block chain with a block index consisting of + // the following structure. + // genesis -> 1 -> 2 -> 3 + tip := tstTip + chain := newFakeChain(&chaincfg.MainNetParams) + branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 3) + for _, node := range branch0Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValid) + chain.index.AddNode(node) + } + chain.bestChain.SetTip(tip(branch0Nodes)) + + activeTip := ChainTip{ + Height: 3, + BlockHash: (tip(branch0Nodes)).hash, + BranchLen: 0, + Status: StatusActive, + } + chainTips := make(map[chainhash.Hash]ChainTip) + chainTips[activeTip.BlockHash] = activeTip + + return chain, chainTips + }, + }, + { + name: "one active chain tip, one unknown chain tip", + chainTipGen: func() (*BlockChain, map[chainhash.Hash]ChainTip) { + // Construct a synthetic block chain with a block index consisting of + // the following structure. + // genesis -> 1 -> 2 -> 3 ... -> 10 -> 11 -> 12 -> 13 (active) + // \-> 11a -> 12a (unknown) + tip := tstTip + chain := newFakeChain(&chaincfg.MainNetParams) + branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 13) + for _, node := range branch0Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValid) + chain.index.AddNode(node) + } + chain.bestChain.SetTip(tip(branch0Nodes)) + + branch1Nodes := chainedNodes(branch0Nodes[9], 2) + for _, node := range branch1Nodes { + chain.index.AddNode(node) + } + + activeTip := ChainTip{ + Height: 13, + BlockHash: (tip(branch0Nodes)).hash, + BranchLen: 0, + Status: StatusActive, + } + unknownTip := ChainTip{ + Height: 12, + BlockHash: (tip(branch1Nodes)).hash, + BranchLen: 2, + Status: StatusUnknown, + } + chainTips := make(map[chainhash.Hash]ChainTip) + chainTips[activeTip.BlockHash] = activeTip + chainTips[unknownTip.BlockHash] = unknownTip + + return chain, chainTips + }, + }, + { + name: "1 inactive tip, 1 invalid tip, 1 active tip", + chainTipGen: func() (*BlockChain, map[chainhash.Hash]ChainTip) { + // Construct a synthetic block chain with a block index consisting of + // the following structure. + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + tip := tstTip + chain := newFakeChain(&chaincfg.MainNetParams) + branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 3) + for _, node := range branch0Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValid) + chain.index.AddNode(node) + } + chain.bestChain.SetTip(tip(branch0Nodes)) + + branch1Nodes := chainedNodes(chain.bestChain.Genesis(), 1) + for _, node := range branch1Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValid) + chain.index.AddNode(node) + } + + branch2Nodes := chainedNodes(chain.bestChain.Genesis(), 1) + for _, node := range branch2Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValidateFailed) + chain.index.AddNode(node) + } + + activeTip := ChainTip{ + Height: tip(branch0Nodes).height, + BlockHash: (tip(branch0Nodes)).hash, + BranchLen: 0, + Status: StatusActive, + } + + inactiveTip := ChainTip{ + Height: tip(branch1Nodes).height, + BlockHash: (tip(branch1Nodes)).hash, + BranchLen: 1, + Status: StatusValidFork, + } + + invalidTip := ChainTip{ + Height: tip(branch2Nodes).height, + BlockHash: (tip(branch2Nodes)).hash, + BranchLen: 1, + Status: StatusInvalid, + } + + chainTips := make(map[chainhash.Hash]ChainTip) + chainTips[activeTip.BlockHash] = activeTip + chainTips[inactiveTip.BlockHash] = inactiveTip + chainTips[invalidTip.BlockHash] = invalidTip + + return chain, chainTips + }, + }, + } + + for _, test := range tests { + chain, expectedChainTips := test.chainTipGen() + gotChainTips := chain.ChainTips() + if len(gotChainTips) != len(expectedChainTips) { + t.Errorf("TestChainTips Failed test %s. Expected %d "+ + "chain tips, got %d", test.name, len(expectedChainTips), len(gotChainTips)) + } + + for _, gotChainTip := range gotChainTips { + testChainTip, found := expectedChainTips[gotChainTip.BlockHash] + if !found { + t.Errorf("TestChainTips Failed test %s. Couldn't find an expected "+ + "chain tip with height %d, hash %s, branchlen %d, status \"%s\"", + test.name, testChainTip.Height, testChainTip.BlockHash.String(), + testChainTip.BranchLen, testChainTip.Status.String()) + } + + if !reflect.DeepEqual(testChainTip, gotChainTip) { + t.Errorf("TestChainTips Failed test %s. Expected chain tip with "+ + "height %d, hash %s, branchlen %d, status \"%s\" but got "+ + "height %d, hash %s, branchlen %d, status \"%s\"", test.name, + testChainTip.Height, testChainTip.BlockHash.String(), + testChainTip.BranchLen, testChainTip.Status.String(), + gotChainTip.Height, gotChainTip.BlockHash.String(), + gotChainTip.BranchLen, gotChainTip.Status.String()) + } + + switch testChainTip.Status { + case StatusActive: + if testChainTip.Status.String() != "active" { + t.Errorf("TestChainTips Fail: Expected string of \"active\", got \"%s\"", + testChainTip.Status.String()) + } + case StatusInvalid: + if testChainTip.Status.String() != "invalid" { + t.Errorf("TestChainTips Fail: Expected string of \"invalid\", got \"%s\"", + testChainTip.Status.String()) + } + case StatusValidFork: + if testChainTip.Status.String() != "valid-fork" { + t.Errorf("TestChainTips Fail: Expected string of \"valid-fork\", got \"%s\"", + testChainTip.Status.String()) + } + case StatusUnknown: + if testChainTip.Status.String() != fmt.Sprintf("unknown: %b", testChainTip.Status) { + t.Errorf("TestChainTips Fail: Expected string of \"unknown\", got \"%s\"", + testChainTip.Status.String()) + } + } + } + } +} diff --git a/blockchain/chainio.go b/blockchain/chainio.go index fa41254da6..75474021f8 100644 --- a/blockchain/chainio.go +++ b/blockchain/chainio.go @@ -12,10 +12,10 @@ import ( "sync" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( @@ -51,6 +51,10 @@ var ( // chain state. chainStateKeyName = []byte("chainstate") + // utxoStateConsistencyKeyName is the name of the db key used to store the + // consistency status of the utxo state. + utxoStateConsistencyKeyName = []byte("utxostateconsistency") + // spendJournalVersionKeyName is the name of the db key used to store // the version of the spend journal currently in the database. spendJournalVersionKeyName = []byte("spendjournalversion") @@ -243,10 +247,10 @@ type SpentTxOut struct { // Amount is the amount of the output. Amount int64 - // PkScipt is the the public key script for the output. + // PkScipt is the public key script for the output. PkScript []byte - // Height is the height of the the block containing the creating tx. + // Height is the height of the block containing the creating tx. Height int32 // Denotes if the creating tx is a coinbase. @@ -494,6 +498,21 @@ func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) erro return spendBucket.Delete(blockHash[:]) } +// dbPruneSpendJournalEntry uses an existing database transaction to remove all +// the spend journal entries for the pruned blocks. +func dbPruneSpendJournalEntry(dbTx database.Tx, blockHashes []chainhash.Hash) error { + spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) + + for _, blockHash := range blockHashes { + err := spendBucket.Delete(blockHash[:]) + if err != nil { + return err + } + } + + return nil +} + // ----------------------------------------------------------------------------- // The unspent transaction output (utxo) set consists of an entry for each // unspent output using a format that is optimized to reduce space using domain @@ -729,11 +748,12 @@ func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, // // When there is no entry for the provided output, nil will be returned for both // the entry and the error. -func dbFetchUtxoEntry(dbTx database.Tx, outpoint wire.OutPoint) (*UtxoEntry, error) { +func dbFetchUtxoEntry(dbTx database.Tx, utxoBucket database.Bucket, + outpoint wire.OutPoint) (*UtxoEntry, error) { + // Fetch the unspent transaction output information for the passed // transaction output. Return now when there is no entry. key := outpointKey(outpoint) - utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) serializedUtxo := utxoBucket.Get(*key) recycleOutpointKey(key) if serializedUtxo == nil { @@ -771,6 +791,11 @@ func dbFetchUtxoEntry(dbTx database.Tx, outpoint wire.OutPoint) (*UtxoEntry, err // particular, only the entries that have been marked as modified are written // to the database. func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error { + // Return early if the view is nil. + if view == nil { + return nil + } + utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) for outpoint, entry := range view.entries { // No need to update the database if the entry was not modified. @@ -780,32 +805,54 @@ func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error { // Remove the utxo entry if it is spent. if entry.IsSpent() { - key := outpointKey(outpoint) - err := utxoBucket.Delete(*key) - recycleOutpointKey(key) + err := dbDeleteUtxoEntry(utxoBucket, outpoint) + if err != nil { + return err + } + } else { + err := dbPutUtxoEntry(utxoBucket, outpoint, entry) if err != nil { return err } - - continue } + } - // Serialize and store the utxo entry. - serialized, err := serializeUtxoEntry(entry) - if err != nil { - return err - } - key := outpointKey(outpoint) - err = utxoBucket.Put(*key, serialized) - // NOTE: The key is intentionally not recycled here since the - // database interface contract prohibits modifications. It will - // be garbage collected normally when the database is done with - // it. - if err != nil { - return err - } + return nil +} + +// dbDeleteUtxoEntry uses an existing database transaction to delete the utxo +// entry from the database. +func dbDeleteUtxoEntry(utxoBucket database.Bucket, outpoint wire.OutPoint) error { + key := outpointKey(outpoint) + err := utxoBucket.Delete(*key) + recycleOutpointKey(key) + return err +} + +// dbPutUtxoEntry uses an existing database transaction to update the utxo entry +// in the database. +func dbPutUtxoEntry(utxoBucket database.Bucket, outpoint wire.OutPoint, + entry *UtxoEntry) error { + + if entry == nil || entry.IsSpent() { + return AssertError("trying to store nil or spent entry") } + // Serialize and store the utxo entry. + serialized, err := serializeUtxoEntry(entry) + if err != nil { + return err + } + key := outpointKey(outpoint) + err = utxoBucket.Put(*key, serialized) + if err != nil { + return err + } + + // NOTE: The key is intentionally not recycled here since the + // database interface contract prohibits modifications. It will + // be garbage collected normally when the database is done with + // it. return nil } @@ -999,6 +1046,21 @@ func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) err return dbTx.Metadata().Put(chainStateKeyName, serializedData) } +// dbPutUtxoStateConsistency uses an existing database transaction to +// update the utxo state consistency status with the given parameters. +func dbPutUtxoStateConsistency(dbTx database.Tx, hash *chainhash.Hash) error { + // Store the utxo state consistency status into the database. + return dbTx.Metadata().Put(utxoStateConsistencyKeyName, hash[:]) +} + +// dbFetchUtxoStateConsistency uses an existing database transaction to retrieve +// the utxo state consistency status from the database. The code is 0 when +// nothing was found. +func dbFetchUtxoStateConsistency(dbTx database.Tx) []byte { + // Fetch the serialized data from the database. + return dbTx.Metadata().Get(utxoStateConsistencyKeyName) +} + // createChainState initializes both the database and the chain state to the // genesis block. This includes creating the necessary buckets and inserting // the genesis block, so it must only be called on an uninitialized database. @@ -1236,7 +1298,7 @@ func (b *BlockChain) initChainState() error { blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block))) numTxns := uint64(len(block.Transactions)) b.stateSnapshot = newBestState(tip, blockSize, blockWeight, - numTxns, state.totalTxns, tip.CalcPastMedianTime()) + numTxns, state.totalTxns, CalcPastMedianTime(tip)) return nil }) diff --git a/blockchain/chainview.go b/blockchain/chainview.go index a4c3692cd6..dd70ab2d01 100644 --- a/blockchain/chainview.go +++ b/blockchain/chainview.go @@ -36,11 +36,13 @@ func fastLog2Floor(n uint32) uint8 { // for comparing chains. // // For example, assume a block chain with a side chain as depicted below: -// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -// \-> 4a -> 5a -> 6a +// +// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 +// \-> 4a -> 5a -> 6a // // The chain view for the branch ending in 6a consists of: -// genesis -> 1 -> 2 -> 3 -> 4a -> 5a -> 6a +// +// genesis -> 1 -> 2 -> 3 -> 4a -> 5a -> 6a type chainView struct { mtx sync.Mutex nodes []*blockNode @@ -258,12 +260,14 @@ func (c *chainView) next(node *blockNode) *blockNode { // view. // // For example, assume a block chain with a side chain as depicted below: -// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -// \-> 4a -> 5a -> 6a +// +// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 +// \-> 4a -> 5a -> 6a // // Further, assume the view is for the longer chain depicted above. That is to // say it consists of: -// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 +// +// genesis -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 // // Invoking this function with block node 5 would return block node 6 while // invoking it with block node 5a would return nil since that node is not part @@ -321,12 +325,14 @@ func (c *chainView) findFork(node *blockNode) *blockNode { // the chain view. It will return nil if there is no common block. // // For example, assume a block chain with a side chain as depicted below: -// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8 -// \-> 6a -> 7a +// +// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8 +// \-> 6a -> 7a // // Further, assume the view is for the longer chain depicted above. That is to // say it consists of: -// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8. +// +// genesis -> 1 -> 2 -> ... -> 5 -> 6 -> 7 -> 8. // // Invoking this function with block node 7a would return block node 5 while // invoking it with block node 7 would return itself since it is already part of diff --git a/blockchain/checkpoints.go b/blockchain/checkpoints.go index dbfa9d146d..74fc23bacb 100644 --- a/blockchain/checkpoints.go +++ b/blockchain/checkpoints.go @@ -8,10 +8,10 @@ import ( "fmt" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/btcutil" ) // CheckpointConfirmations is the number of blocks before the end of the current @@ -184,14 +184,14 @@ func isNonstandardTransaction(tx *btcutil.Tx) bool { // checkpoint candidate. // // The factors used to determine a good checkpoint are: -// - The block must be in the main chain -// - The block must be at least 'CheckpointConfirmations' blocks prior to the -// current end of the main chain -// - The timestamps for the blocks before and after the checkpoint must have -// timestamps which are also before and after the checkpoint, respectively -// (due to the median time allowance this is not always the case) -// - The block must not contain any strange transaction such as those with -// nonstandard scripts +// - The block must be in the main chain +// - The block must be at least 'CheckpointConfirmations' blocks prior to the +// current end of the main chain +// - The timestamps for the blocks before and after the checkpoint must have +// timestamps which are also before and after the checkpoint, respectively +// (due to the median time allowance this is not always the case) +// - The block must not contain any strange transaction such as those with +// nonstandard scripts // // The intent is that candidates are reviewed by a developer to make the final // decision and then manually added to the list of checkpoints for a network. diff --git a/blockchain/difficulty.go b/blockchain/difficulty.go index 05f78a3ed1..1fa850cc37 100644 --- a/blockchain/difficulty.go +++ b/blockchain/difficulty.go @@ -42,9 +42,9 @@ func HashToBig(hash *chainhash.Hash) *big.Int { // Like IEEE754 floating point, there are three basic components: the sign, // the exponent, and the mantissa. They are broken out as follows: // -// * the most significant 8 bits represent the unsigned base 256 exponent -// * bit 23 (the 24th bit) represents the sign bit -// * the least significant 23 bits represent the mantissa +// - the most significant 8 bits represent the unsigned base 256 exponent +// - bit 23 (the 24th bit) represents the sign bit +// - the least significant 23 bits represent the mantissa // // ------------------------------------------------- // | Exponent | Sign | Mantissa | @@ -53,7 +53,8 @@ func HashToBig(hash *chainhash.Hash) *big.Int { // ------------------------------------------------- // // The formula to calculate N is: -// N = (-1^sign) * mantissa * 256^(exponent-3) +// +// N = (-1^sign) * mantissa * 256^(exponent-3) // // This compact form is only used in bitcoin to encode unsigned 256-bit numbers // which represent difficulty targets, thus there really is not a need for a @@ -192,80 +193,87 @@ func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) // findPrevTestNetDifficulty returns the difficulty of the previous block which // did not have the special testnet minimum difficulty rule applied. -// -// This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) uint32 { +func findPrevTestNetDifficulty(startNode HeaderCtx, c ChainCtx) uint32 { // Search backwards through the chain for the last block without // the special rule applied. iterNode := startNode - for iterNode != nil && iterNode.height%b.blocksPerRetarget != 0 && - iterNode.bits == b.chainParams.PowLimitBits { + for iterNode != nil && iterNode.Height()%c.BlocksPerRetarget() != 0 && + iterNode.Bits() == c.ChainParams().PowLimitBits { - iterNode = iterNode.parent + iterNode = iterNode.Parent() } // Return the found difficulty or the minimum difficulty if no // appropriate block was found. - lastBits := b.chainParams.PowLimitBits + lastBits := c.ChainParams().PowLimitBits if iterNode != nil { - lastBits = iterNode.bits + lastBits = iterNode.Bits() } return lastBits } // calcNextRequiredDifficulty calculates the required difficulty for the block -// after the passed previous block node based on the difficulty retarget rules. +// after the passed previous HeaderCtx based on the difficulty retarget rules. // This function differs from the exported CalcNextRequiredDifficulty in that -// the exported version uses the current best chain as the previous block node -// while this function accepts any block node. -func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTime time.Time) (uint32, error) { +// the exported version uses the current best chain as the previous HeaderCtx +// while this function accepts any block node. This function accepts a ChainCtx +// parameter that gives the necessary difficulty context variables. +func calcNextRequiredDifficulty(lastNode HeaderCtx, newBlockTime time.Time, + c ChainCtx) (uint32, error) { + + // Emulate the same behavior as Bitcoin Core that for regtest there is + // no difficulty retargeting. + if c.ChainParams().PoWNoRetargeting { + return c.ChainParams().PowLimitBits, nil + } + // Genesis block. if lastNode == nil { - return b.chainParams.PowLimitBits, nil + return c.ChainParams().PowLimitBits, nil } // Return the previous block's difficulty requirements if this block // is not at a difficulty retarget interval. - if (lastNode.height+1)%b.blocksPerRetarget != 0 { + if (lastNode.Height()+1)%c.BlocksPerRetarget() != 0 { // For networks that support it, allow special reduction of the // required difficulty once too much time has elapsed without // mining a block. - if b.chainParams.ReduceMinDifficulty { + if c.ChainParams().ReduceMinDifficulty { // Return minimum difficulty when more than the desired // amount of time has elapsed without mining a block. - reductionTime := int64(b.chainParams.MinDiffReductionTime / + reductionTime := int64(c.ChainParams().MinDiffReductionTime / time.Second) - allowMinTime := lastNode.timestamp + reductionTime + allowMinTime := lastNode.Timestamp() + reductionTime if newBlockTime.Unix() > allowMinTime { - return b.chainParams.PowLimitBits, nil + return c.ChainParams().PowLimitBits, nil } // The block was mined within the desired timeframe, so // return the difficulty for the last block which did // not have the special minimum difficulty rule applied. - return b.findPrevTestNetDifficulty(lastNode), nil + return findPrevTestNetDifficulty(lastNode, c), nil } // For the main network (or any unrecognized networks), simply // return the previous block's difficulty requirements. - return lastNode.bits, nil + return lastNode.Bits(), nil } // Get the block node at the previous retarget (targetTimespan days // worth of blocks). - firstNode := lastNode.RelativeAncestor(b.blocksPerRetarget - 1) + firstNode := lastNode.RelativeAncestorCtx(c.BlocksPerRetarget() - 1) if firstNode == nil { return 0, AssertError("unable to obtain previous retarget block") } // Limit the amount of adjustment that can occur to the previous // difficulty. - actualTimespan := lastNode.timestamp - firstNode.timestamp + actualTimespan := lastNode.Timestamp() - firstNode.Timestamp() adjustedTimespan := actualTimespan - if actualTimespan < b.minRetargetTimespan { - adjustedTimespan = b.minRetargetTimespan - } else if actualTimespan > b.maxRetargetTimespan { - adjustedTimespan = b.maxRetargetTimespan + if actualTimespan < c.MinRetargetTimespan() { + adjustedTimespan = c.MinRetargetTimespan() + } else if actualTimespan > c.MaxRetargetTimespan() { + adjustedTimespan = c.MaxRetargetTimespan() } // Calculate new target difficulty as: @@ -273,14 +281,14 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // The result uses integer division which means it will be slightly // rounded down. Bitcoind also uses integer division to calculate this // result. - oldTarget := CompactToBig(lastNode.bits) + oldTarget := CompactToBig(lastNode.Bits()) newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan)) - targetTimeSpan := int64(b.chainParams.TargetTimespan / time.Second) + targetTimeSpan := int64(c.ChainParams().TargetTimespan / time.Second) newTarget.Div(newTarget, big.NewInt(targetTimeSpan)) // Limit new value to the proof of work limit. - if newTarget.Cmp(b.chainParams.PowLimit) > 0 { - newTarget.Set(b.chainParams.PowLimit) + if newTarget.Cmp(c.ChainParams().PowLimit) > 0 { + newTarget.Set(c.ChainParams().PowLimit) } // Log new target difficulty and return it. The new target logging is @@ -288,13 +296,13 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // newTarget since conversion to the compact representation loses // precision. newTargetBits := BigToCompact(newTarget) - log.Debugf("Difficulty retarget at block height %d", lastNode.height+1) - log.Debugf("Old target %08x (%064x)", lastNode.bits, oldTarget) + log.Debugf("Difficulty retarget at block height %d", lastNode.Height()+1) + log.Debugf("Old target %08x (%064x)", lastNode.Bits(), oldTarget) log.Debugf("New target %08x (%064x)", newTargetBits, CompactToBig(newTargetBits)) log.Debugf("Actual timespan %v, adjusted timespan %v, target timespan %v", time.Duration(actualTimespan)*time.Second, time.Duration(adjustedTimespan)*time.Second, - b.chainParams.TargetTimespan) + c.ChainParams().TargetTimespan) return newTargetBits, nil } @@ -306,7 +314,7 @@ func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTim // This function is safe for concurrent access. func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time) (uint32, error) { b.chainLock.Lock() - difficulty, err := b.calcNextRequiredDifficulty(b.bestChain.Tip(), timestamp) + difficulty, err := calcNextRequiredDifficulty(b.bestChain.Tip(), timestamp, b) b.chainLock.Unlock() return difficulty, err } diff --git a/blockchain/doc.go b/blockchain/doc.go index 244175414a..d57acc29c9 100644 --- a/blockchain/doc.go +++ b/blockchain/doc.go @@ -26,42 +26,42 @@ caller a high level of flexibility in how they want to react to certain events such as orphan blocks which need their parents requested and newly connected main chain blocks which might result in wallet updates. -Bitcoin Chain Processing Overview +# Bitcoin Chain Processing Overview Before a block is allowed into the block chain, it must go through an intensive series of validation rules. The following list serves as a general outline of those rules to provide some intuition into what is going on under the hood, but is by no means exhaustive: - - Reject duplicate blocks - - Perform a series of sanity checks on the block and its transactions such as - verifying proof of work, timestamps, number and character of transactions, - transaction amounts, script complexity, and merkle root calculations - - Compare the block against predetermined checkpoints for expected timestamps - and difficulty based on elapsed time since the checkpoint - - Save the most recent orphan blocks for a limited time in case their parent - blocks become available - - Stop processing if the block is an orphan as the rest of the processing - depends on the block's position within the block chain - - Perform a series of more thorough checks that depend on the block's position - within the block chain such as verifying block difficulties adhere to - difficulty retarget rules, timestamps are after the median of the last - several blocks, all transactions are finalized, checkpoint blocks match, and - block versions are in line with the previous blocks - - Determine how the block fits into the chain and perform different actions - accordingly in order to ensure any side chains which have higher difficulty - than the main chain become the new main chain - - When a block is being connected to the main chain (either through - reorganization of a side chain to the main chain or just extending the - main chain), perform further checks on the block's transactions such as - verifying transaction duplicates, script complexity for the combination of - connected scripts, coinbase maturity, double spends, and connected - transaction values - - Run the transaction scripts to verify the spender is allowed to spend the - coins - - Insert the block into the block database + - Reject duplicate blocks + - Perform a series of sanity checks on the block and its transactions such as + verifying proof of work, timestamps, number and character of transactions, + transaction amounts, script complexity, and merkle root calculations + - Compare the block against predetermined checkpoints for expected timestamps + and difficulty based on elapsed time since the checkpoint + - Save the most recent orphan blocks for a limited time in case their parent + blocks become available + - Stop processing if the block is an orphan as the rest of the processing + depends on the block's position within the block chain + - Perform a series of more thorough checks that depend on the block's position + within the block chain such as verifying block difficulties adhere to + difficulty retarget rules, timestamps are after the median of the last + several blocks, all transactions are finalized, checkpoint blocks match, and + block versions are in line with the previous blocks + - Determine how the block fits into the chain and perform different actions + accordingly in order to ensure any side chains which have higher difficulty + than the main chain become the new main chain + - When a block is being connected to the main chain (either through + reorganization of a side chain to the main chain or just extending the + main chain), perform further checks on the block's transactions such as + verifying transaction duplicates, script complexity for the combination of + connected scripts, coinbase maturity, double spends, and connected + transaction values + - Run the transaction scripts to verify the spender is allowed to spend the + coins + - Insert the block into the block database -Errors +# Errors Errors returned by this package are either the raw errors provided by underlying calls or of type blockchain.RuleError. This allows the caller to differentiate @@ -70,12 +70,12 @@ violations through type assertions. In addition, callers can programmatically determine the specific rule violation by examining the ErrorCode field of the type asserted blockchain.RuleError. -Bitcoin Improvement Proposals +# Bitcoin Improvement Proposals This package includes spec changes outlined by the following BIPs: - BIP0016 (https://en.bitcoin.it/wiki/BIP_0016) - BIP0030 (https://en.bitcoin.it/wiki/BIP_0030) - BIP0034 (https://en.bitcoin.it/wiki/BIP_0034) + BIP0016 (https://en.bitcoin.it/wiki/BIP_0016) + BIP0030 (https://en.bitcoin.it/wiki/BIP_0030) + BIP0034 (https://en.bitcoin.it/wiki/BIP_0034) */ package blockchain diff --git a/blockchain/example_test.go b/blockchain/example_test.go index 7f15e59bc6..8db570273d 100644 --- a/blockchain/example_test.go +++ b/blockchain/example_test.go @@ -11,10 +11,10 @@ import ( "path/filepath" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/database" _ "github.com/btcsuite/btcd/database/ffldb" - "github.com/btcsuite/btcd/btcutil" ) // This example demonstrates how to create a new chain instance and use diff --git a/blockchain/fullblocks_test.go b/blockchain/fullblocks_test.go index 3cc7c87b70..d6bcf799af 100644 --- a/blockchain/fullblocks_test.go +++ b/blockchain/fullblocks_test.go @@ -14,13 +14,13 @@ import ( "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/blockchain/fullblocktests" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" _ "github.com/btcsuite/btcd/database/ffldb" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( diff --git a/blockchain/fullblocktests/generate.go b/blockchain/fullblocktests/generate.go index 964986dbcf..4c551c05e0 100644 --- a/blockchain/fullblocktests/generate.go +++ b/blockchain/fullblocktests/generate.go @@ -20,11 +20,11 @@ import ( "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( @@ -309,8 +309,7 @@ func calcMerkleRoot(txns []*wire.MsgTx) chainhash.Hash { for _, tx := range txns { utilTxns = append(utilTxns, btcutil.NewTx(tx)) } - merkles := blockchain.BuildMerkleTreeStore(utilTxns, false) - return *merkles[len(merkles)-1] + return blockchain.CalcMerkleRoot(utilTxns, false) } // solveBlock attempts to find a nonce which makes the passed block header hash @@ -423,7 +422,7 @@ func replaceCoinbaseSigScript(script []byte) func(*wire.MsgBlock) { } // additionalTx returns a function that itself takes a block and modifies it by -// adding the the provided transaction. +// adding the provided transaction. func additionalTx(tx *wire.MsgTx) func(*wire.MsgBlock) { return func(b *wire.MsgBlock) { b.AddTransaction(tx) @@ -466,9 +465,9 @@ func createSpendTxForTx(tx *wire.MsgTx, fee btcutil.Amount) *wire.MsgTx { // - A coinbase that pays the required subsidy to an OP_TRUE script // - When a spendable output is provided: // - A transaction that spends from the provided output the following outputs: -// - One that pays the inputs amount minus 1 atom to an OP_TRUE script -// - One that contains an OP_RETURN output with a random uint64 in order to -// ensure the transaction has a unique hash +// - One that pays the inputs amount minus 1 atom to an OP_TRUE script +// - One that contains an OP_RETURN output with a random uint64 in order to +// ensure the transaction has a unique hash // // Additionally, if one or more munge functions are specified, they will be // invoked with the block prior to solving it. This provides callers with the diff --git a/blockchain/indexers/addrindex.go b/blockchain/indexers/addrindex.go index 7e9f36f104..7eaaab06b7 100644 --- a/blockchain/indexers/addrindex.go +++ b/blockchain/indexers/addrindex.go @@ -64,7 +64,7 @@ const ( addrKeyTypeWitnessScriptHash = 3 // addrKeyTypeTaprootPubKey is the address type in an address key that - // represnts a pay-to-taproot adress. We use this to denote addresses + // represnts a pay-to-taproot address. We use this to denote addresses // related to the segwit v1 that are encoded in the bech32m format. addrKeyTypeTaprootPubKey = 4 @@ -991,3 +991,15 @@ func NewAddrIndex(db database.DB, chainParams *chaincfg.Params) *AddrIndex { func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error { return dropIndex(db, addrIndexKey, addrIndexName, interrupt) } + +// AddrIndexInitialized returns true if the address index has been created previously. +func AddrIndexInitialized(db database.DB) bool { + var exists bool + db.View(func(dbTx database.Tx) error { + bucket := dbTx.Metadata().Bucket(addrIndexKey) + exists = bucket != nil + return nil + }) + + return exists +} diff --git a/blockchain/indexers/blocklogger.go b/blockchain/indexers/blocklogger.go index 3671c0162a..960a51d2c1 100644 --- a/blockchain/indexers/blocklogger.go +++ b/blockchain/indexers/blocklogger.go @@ -8,8 +8,8 @@ import ( "sync" "time" - "github.com/btcsuite/btclog" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btclog" ) // blockProgressLogger provides periodic logging for other services in order @@ -27,8 +27,9 @@ type blockProgressLogger struct { // newBlockProgressLogger returns a new block progress logger. // The progress message is templated as follows: -// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod} -// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp}) +// +// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod} +// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp}) func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger { return &blockProgressLogger{ lastBlockLogTime: time.Now(), diff --git a/blockchain/indexers/cfindex.go b/blockchain/indexers/cfindex.go index fa0ee3c0a6..1af1d0a421 100644 --- a/blockchain/indexers/cfindex.go +++ b/blockchain/indexers/cfindex.go @@ -8,13 +8,13 @@ import ( "errors" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/btcutil/gcs" + "github.com/btcsuite/btcd/btcutil/gcs/builder" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" - "github.com/btcsuite/btcd/btcutil/gcs" - "github.com/btcsuite/btcd/btcutil/gcs/builder" ) const ( @@ -355,3 +355,15 @@ func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex { func DropCfIndex(db database.DB, interrupt <-chan struct{}) error { return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt) } + +// CfIndexInitialized returns true if the cfindex has been created previously. +func CfIndexInitialized(db database.DB) bool { + var exists bool + db.View(func(dbTx database.Tx) error { + bucket := dbTx.Metadata().Bucket(cfIndexParentBucketKey) + exists = bucket != nil + return nil + }) + + return exists +} diff --git a/blockchain/indexers/common.go b/blockchain/indexers/common.go index 07b2feca5d..89ce6720b5 100644 --- a/blockchain/indexers/common.go +++ b/blockchain/indexers/common.go @@ -12,8 +12,8 @@ import ( "errors" "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/database" ) var ( diff --git a/blockchain/indexers/manager.go b/blockchain/indexers/manager.go index 8c87ca0771..b4487e60fd 100644 --- a/blockchain/indexers/manager.go +++ b/blockchain/indexers/manager.go @@ -9,10 +9,10 @@ import ( "fmt" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) var ( diff --git a/blockchain/indexers/txindex.go b/blockchain/indexers/txindex.go index f1d734e06b..3d4e914677 100644 --- a/blockchain/indexers/txindex.go +++ b/blockchain/indexers/txindex.go @@ -9,10 +9,10 @@ import ( "fmt" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( @@ -481,3 +481,15 @@ func DropTxIndex(db database.DB, interrupt <-chan struct{}) error { return dropIndex(db, txIndexKey, txIndexName, interrupt) } + +// TxIndexInitialized returns true if the tx index has been created previously. +func TxIndexInitialized(db database.DB) bool { + var exists bool + db.View(func(dbTx database.Tx) error { + bucket := dbTx.Metadata().Bucket(txIndexKey) + exists = bucket != nil + return nil + }) + + return exists +} diff --git a/blockchain/interfaces.go b/blockchain/interfaces.go new file mode 100644 index 0000000000..cae9b3b9f0 --- /dev/null +++ b/blockchain/interfaces.go @@ -0,0 +1,55 @@ +package blockchain + +import ( + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +// ChainCtx is an interface that abstracts away blockchain parameters. +type ChainCtx interface { + // ChainParams returns the chain's configured chaincfg.Params. + ChainParams() *chaincfg.Params + + // BlocksPerRetarget returns the number of blocks before retargeting + // occurs. + BlocksPerRetarget() int32 + + // MinRetargetTimespan returns the minimum amount of time to use in the + // difficulty calculation. + MinRetargetTimespan() int64 + + // MaxRetargetTimespan returns the maximum amount of time to use in the + // difficulty calculation. + MaxRetargetTimespan() int64 + + // VerifyCheckpoint returns whether the passed height and hash match + // the checkpoint data. Not all instances of VerifyCheckpoint will use + // this function for validation. + VerifyCheckpoint(height int32, hash *chainhash.Hash) bool + + // FindPreviousCheckpoint returns the most recent checkpoint that we + // have validated. Not all instances of FindPreviousCheckpoint will use + // this function for validation. + FindPreviousCheckpoint() (HeaderCtx, error) +} + +// HeaderCtx is an interface that describes information about a block. This is +// used so that external libraries can provide their own context (the header's +// parent, bits, etc.) when attempting to contextually validate a header. +type HeaderCtx interface { + // Height returns the header's height. + Height() int32 + + // Bits returns the header's bits. + Bits() uint32 + + // Timestamp returns the header's timestamp. + Timestamp() int64 + + // Parent returns the header's parent. + Parent() HeaderCtx + + // RelativeAncestorCtx returns the header's ancestor that is distance + // blocks before it in the chain. + RelativeAncestorCtx(distance int32) HeaderCtx +} diff --git a/blockchain/merkle.go b/blockchain/merkle.go index d7e567b283..b89b518505 100644 --- a/blockchain/merkle.go +++ b/blockchain/merkle.go @@ -7,11 +7,12 @@ package blockchain import ( "bytes" "fmt" + "io" "math" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/btcutil" ) const ( @@ -58,14 +59,16 @@ func nextPowerOfTwo(n int) int { // HashMerkleBranches takes two hashes, treated as the left and right tree // nodes, and returns the hash of their concatenation. This is a helper // function used to aid in the generation of a merkle tree. -func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash.Hash { +func HashMerkleBranches(left, right *chainhash.Hash) chainhash.Hash { // Concatenate the left and right nodes. var hash [chainhash.HashSize * 2]byte copy(hash[:chainhash.HashSize], left[:]) copy(hash[chainhash.HashSize:], right[:]) - newHash := chainhash.DoubleHashH(hash[:]) - return &newHash + return chainhash.DoubleHashRaw(func(w io.Writer) error { + _, err := w.Write(hash[:]) + return err + }) } // BuildMerkleTreeStore creates a merkle tree from a slice of transactions, @@ -86,7 +89,7 @@ func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash. // // The above stored as a linear array is as follows: // -// [h1 h2 h3 h4 h12 h34 root] +// [h1 h2 h3 h4 h12 h34 root] // // As the above shows, the merkle root is always the last element in the array. // @@ -140,13 +143,13 @@ func BuildMerkleTreeStore(transactions []*btcutil.Tx, witness bool) []*chainhash // hashing the concatenation of the left child with itself. case merkles[i+1] == nil: newHash := HashMerkleBranches(merkles[i], merkles[i]) - merkles[offset] = newHash + merkles[offset] = &newHash // The normal case sets the parent node to the double sha256 // of the concatentation of the left and right children. default: newHash := HashMerkleBranches(merkles[i], merkles[i+1]) - merkles[offset] = newHash + merkles[offset] = &newHash } offset++ } @@ -154,6 +157,36 @@ func BuildMerkleTreeStore(transactions []*btcutil.Tx, witness bool) []*chainhash return merkles } +// CalcMerkleRoot computes the merkle root over a set of hashed leaves. The +// interior nodes are computed opportunistically as the leaves are added to the +// abstract tree to reduce the total number of allocations. Throughout the +// computation, this computation only requires storing O(log n) interior +// nodes. +// +// This method differs from BuildMerkleTreeStore in that the interior nodes are +// discarded instead of being returned along with the root. CalcMerkleRoot is +// slightly faster than BuildMerkleTreeStore and requires significantly less +// memory and fewer allocations. +// +// A merkle tree is a tree in which every non-leaf node is the hash of its +// children nodes. A diagram depicting how this works for bitcoin transactions +// where h(x) is a double sha256 follows: +// +// root = h1234 = h(h12 + h34) +// / \ +// h12 = h(h1 + h2) h34 = h(h3 + h4) +// / \ / \ +// h1 = h(tx1) h2 = h(tx2) h3 = h(tx3) h4 = h(tx4) +// +// The additional bool parameter indicates if we are generating the merkle tree +// using witness transaction id's rather than regular transaction id's. This +// also presents an additional case wherein the wtxid of the coinbase transaction +// is the zeroHash. +func CalcMerkleRoot(transactions []*btcutil.Tx, witness bool) chainhash.Hash { + s := newRollingMerkleTreeStore(uint64(len(transactions))) + return s.calcMerkleRoot(transactions, witness) +} + // ExtractWitnessCommitment attempts to locate, and return the witness // commitment for a block. The witness commitment is of the form: // SHA256(witness root || witness nonce). The function additionally returns a @@ -246,8 +279,7 @@ func ValidateWitnessCommitment(blk *btcutil.Block) error { // the extracted witnessCommitment is equal to: // SHA256(witnessMerkleRoot || witnessNonce). Where witnessNonce is the // coinbase transaction's only witness item. - witnessMerkleTree := BuildMerkleTreeStore(blk.Transactions(), true) - witnessMerkleRoot := witnessMerkleTree[len(witnessMerkleTree)-1] + witnessMerkleRoot := CalcMerkleRoot(blk.Transactions(), true) var witnessPreimage [chainhash.HashSize * 2]byte copy(witnessPreimage[:], witnessMerkleRoot[:]) diff --git a/blockchain/merkle_test.go b/blockchain/merkle_test.go index 1a224586fa..06eb7012a2 100644 --- a/blockchain/merkle_test.go +++ b/blockchain/merkle_test.go @@ -5,19 +5,105 @@ package blockchain import ( + "fmt" "testing" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" ) // TestMerkle tests the BuildMerkleTreeStore API. func TestMerkle(t *testing.T) { block := btcutil.NewBlock(&Block100000) - merkles := BuildMerkleTreeStore(block.Transactions(), false) - calculatedMerkleRoot := merkles[len(merkles)-1] + calcMerkleRoot := CalcMerkleRoot(block.Transactions(), false) + merkleStoreTree := BuildMerkleTreeStore(block.Transactions(), false) + merkleStoreRoot := merkleStoreTree[len(merkleStoreTree)-1] + + require.Equal(t, *merkleStoreRoot, calcMerkleRoot) + wantMerkle := &Block100000.Header.MerkleRoot - if !wantMerkle.IsEqual(calculatedMerkleRoot) { + if !wantMerkle.IsEqual(&calcMerkleRoot) { t.Errorf("BuildMerkleTreeStore: merkle root mismatch - "+ - "got %v, want %v", calculatedMerkleRoot, wantMerkle) + "got %v, want %v", calcMerkleRoot, wantMerkle) + } +} + +func makeHashes(size int) []*chainhash.Hash { + var hashes = make([]*chainhash.Hash, size) + for i := range hashes { + hashes[i] = new(chainhash.Hash) + } + return hashes +} + +func makeTxs(size int) []*btcutil.Tx { + var txs = make([]*btcutil.Tx, size) + for i := range txs { + tx := btcutil.NewTx(wire.NewMsgTx(2)) + tx.Hash() + txs[i] = tx + } + return txs +} + +// BenchmarkRollingMerkle benches the RollingMerkleTree while varying the number +// of leaves pushed to the tree. +func BenchmarkRollingMerkle(b *testing.B) { + sizes := []int{ + 1000, + 2000, + 4000, + 8000, + 16000, + 32000, + } + + for _, size := range sizes { + txs := makeTxs(size) + name := fmt.Sprintf("%d", size) + b.Run(name, func(b *testing.B) { + benchmarkRollingMerkle(b, txs) + }) + } +} + +// BenchmarkMerkle benches the BuildMerkleTreeStore while varying the number +// of leaves pushed to the tree. +func BenchmarkMerkle(b *testing.B) { + sizes := []int{ + 1000, + 2000, + 4000, + 8000, + 16000, + 32000, + } + + for _, size := range sizes { + txs := makeTxs(size) + name := fmt.Sprintf("%d", size) + b.Run(name, func(b *testing.B) { + benchmarkMerkle(b, txs) + }) + } +} + +func benchmarkRollingMerkle(b *testing.B, txs []*btcutil.Tx) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + CalcMerkleRoot(txs, false) + } +} + +func benchmarkMerkle(b *testing.B, txs []*btcutil.Tx) { + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + BuildMerkleTreeStore(txs, false) } } diff --git a/blockchain/notifications.go b/blockchain/notifications.go index 25cc4f1f03..5139e89edf 100644 --- a/blockchain/notifications.go +++ b/blockchain/notifications.go @@ -50,9 +50,9 @@ func (n NotificationType) String() string { // Notification defines notification that is sent to the caller via the callback // function provided during the call to New and consists of a notification type // as well as associated data that depends on the type as follows: -// - NTBlockAccepted: *btcutil.Block -// - NTBlockConnected: *btcutil.Block -// - NTBlockDisconnected: *btcutil.Block +// - NTBlockAccepted: *btcutil.Block +// - NTBlockConnected: *btcutil.Block +// - NTBlockDisconnected: *btcutil.Block type Notification struct { Type NotificationType Data interface{} diff --git a/blockchain/process.go b/blockchain/process.go index c367b4ceff..64d5c1e14f 100644 --- a/blockchain/process.go +++ b/blockchain/process.go @@ -8,9 +8,9 @@ import ( "fmt" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/btcutil" ) // BehaviorFlags is a bitmask defining tweaks to the normal behavior when diff --git a/blockchain/rolling_merkle.go b/blockchain/rolling_merkle.go new file mode 100644 index 0000000000..cd2c2ec7e6 --- /dev/null +++ b/blockchain/rolling_merkle.go @@ -0,0 +1,136 @@ +package blockchain + +import ( + "math/bits" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +// rollingMerkleTreeStore calculates the merkle root by only allocating O(logN) +// memory where N is the total amount of leaves being included in the tree. +type rollingMerkleTreeStore struct { + // roots are where the temporary merkle roots get stored while the + // merkle root is being calculated. + roots []chainhash.Hash + + // numLeaves is the total leaves the store has processed. numLeaves + // is required for the root calculation algorithm to work. + numLeaves uint64 +} + +// newRollingMerkleTreeStore returns a rollingMerkleTreeStore with the roots +// allocated based on the passed in size. +// +// NOTE: If more elements are added in than the passed in size, there will be +// additional allocations which in turn hurts performance. +func newRollingMerkleTreeStore(size uint64) rollingMerkleTreeStore { + var alloc int + if size != 0 { + alloc = bits.Len64(size - 1) + } + return rollingMerkleTreeStore{roots: make([]chainhash.Hash, 0, alloc)} +} + +// add adds a single hash to the merkle tree store. Refer to algorithm 1 "AddOne" in +// the utreexo paper (https://eprint.iacr.org/2019/611.pdf) for the exact algorithm. +func (s *rollingMerkleTreeStore) add(add chainhash.Hash) { + // We can tell where the roots are by looking at the binary representation + // of the numLeaves. Wherever there's a 1, there's a root. + // + // numLeaves of 8 will be '1000' in binary, so there will be one root at + // row 3. numLeaves of 3 will be '11' in binary, so there's two roots. One at + // row 0 and one at row 1. Row 0 is the leaf row. + // + // In this loop below, we're looking for these roots by checking if there's + // a '1', starting from the LSB. If there is a '1', we'll hash the root being + // added with that root until we hit a '0'. + newRoot := add + for h := uint8(0); (s.numLeaves>>h)&1 == 1; h++ { + // Pop off the last root. + var root chainhash.Hash + root, s.roots = s.roots[len(s.roots)-1], s.roots[:len(s.roots)-1] + + // Calculate the hash of the new root and append it. + newRoot = HashMerkleBranches(&root, &newRoot) + } + s.roots = append(s.roots, newRoot) + s.numLeaves++ +} + +// calcMerkleRoot returns the merkle root for the passed in transactions. +func (s *rollingMerkleTreeStore) calcMerkleRoot(adds []*btcutil.Tx, witness bool) chainhash.Hash { + for i := range adds { + // If we're computing a witness merkle root, instead of the + // regular txid, we use the modified wtxid which includes a + // transaction's witness data within the digest. Additionally, + // the coinbase's wtxid is all zeroes. + switch { + case witness && i == 0: + var zeroHash chainhash.Hash + s.add(zeroHash) + case witness: + s.add(*adds[i].WitnessHash()) + default: + s.add(*adds[i].Hash()) + } + } + + // If we only have one leaf, then the hash of that tx is the merkle root. + if s.numLeaves == 1 { + return s.roots[0] + } + + // Add on the last tx again if there's an odd number of txs. + if len(adds) > 0 && len(adds)%2 != 0 { + switch { + case witness: + s.add(*adds[len(adds)-1].WitnessHash()) + default: + s.add(*adds[len(adds)-1].Hash()) + } + } + + // If we still have more than 1 root after adding on the last tx again, + // we need to do the same for the upper rows. + // + // For example, the below tree has 6 leaves. For row 1, you'll need to + // hash 'F' with itself to create 'C' so you have something to hash with + // 'B'. For bigger trees we may need to do the same in rows 2 or 3 as + // well. + // + // row :3 A + // / \ + // row :2 B C + // / \ / \ + // row :1 D E F F + // / \ / \ / \ + // row :0 1 2 3 4 5 6 + for len(s.roots) > 1 { + // If we have to keep adding the last node in the set, bitshift + // the num leaves right by 1. This effectively moves the row up + // for calculation. We do this until we reach a row where there's + // an odd number of leaves. + // + // row :3 A + // / \ + // row :2 B C D + // / \ / \ / \ + // row :1 E F G H I J + // / \ / \ / \ / \ / \ / \ + // row :0 1 2 3 4 5 6 7 8 9 10 11 12 + // + // In the above tree, 12 leaves were added and there's an odd amount + // of leaves at row 2. Because of this, we'll bitshift right twice. + currentLeaves := s.numLeaves + for h := uint8(0); (currentLeaves>>h)&1 == 0; h++ { + s.numLeaves >>= 1 + } + + // Add the last root again so that it'll get hashed with itself. + h := s.roots[len(s.roots)-1] + s.add(h) + } + + return s.roots[0] +} diff --git a/blockchain/rolling_merkle_test.go b/blockchain/rolling_merkle_test.go new file mode 100644 index 0000000000..e425278bdd --- /dev/null +++ b/blockchain/rolling_merkle_test.go @@ -0,0 +1,174 @@ +package blockchain + +import ( + "testing" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/stretchr/testify/require" +) + +func TestRollingMerkleAdd(t *testing.T) { + tests := []struct { + leaves []chainhash.Hash + expectedRoots []chainhash.Hash + expectedNumLeaves uint64 + }{ + // 00 (00 is also a root) + { + leaves: []chainhash.Hash{ + {0x00}, + }, + expectedRoots: []chainhash.Hash{ + {0x00}, + }, + expectedNumLeaves: 1, + }, + + // root + // |---\ + // 00 01 + { + leaves: []chainhash.Hash{ + {0x00}, + {0x01}, + }, + expectedRoots: []chainhash.Hash{ + func() chainhash.Hash { + hash, err := chainhash.NewHashFromStr( + "c2bf026e62af95cd" + + "7b785e2cd5a5f1ec" + + "01fafda85886a8eb" + + "d34482c0b05dc2c2") + require.NoError(t, err) + return *hash + }(), + }, + expectedNumLeaves: 2, + }, + + // root + // |---\ + // 00 01 02 + { + leaves: []chainhash.Hash{ + {0x00}, + {0x01}, + {0x02}, + }, + expectedRoots: []chainhash.Hash{ + func() chainhash.Hash { + hash, err := chainhash.NewHashFromStr( + "c2bf026e62af95cd" + + "7b785e2cd5a5f1ec" + + "01fafda85886a8eb" + + "d34482c0b05dc2c2") + require.NoError(t, err) + return *hash + }(), + {0x02}, + }, + expectedNumLeaves: 3, + }, + + // root + // |-------\ + // br br + // |---\ |---\ + // 00 01 02 03 + { + leaves: []chainhash.Hash{ + {0x00}, + {0x01}, + {0x02}, + {0x03}, + }, + expectedRoots: []chainhash.Hash{ + func() chainhash.Hash { + hash, err := chainhash.NewHashFromStr( + "270714425ea73eb8" + + "5942f0f705788f25" + + "1fefa3f533410a3f" + + "338de46e641082c4") + require.NoError(t, err) + return *hash + }(), + }, + expectedNumLeaves: 4, + }, + + // root + // |-------\ + // br br + // |---\ |---\ + // 00 01 02 03 04 + { + leaves: []chainhash.Hash{ + {0x00}, + {0x01}, + {0x02}, + {0x03}, + {0x04}, + }, + expectedRoots: []chainhash.Hash{ + func() chainhash.Hash { + hash, err := chainhash.NewHashFromStr( + "270714425ea73eb8" + + "5942f0f705788f25" + + "1fefa3f533410a3f" + + "338de46e641082c4") + require.NoError(t, err) + return *hash + }(), + {0x04}, + }, + expectedNumLeaves: 5, + }, + + // root + // |-------\ + // br br root + // |---\ |---\ |---\ + // 00 01 02 03 04 05 + { + leaves: []chainhash.Hash{ + {0x00}, + {0x01}, + {0x02}, + {0x03}, + {0x04}, + {0x05}, + }, + expectedRoots: []chainhash.Hash{ + func() chainhash.Hash { + hash, err := chainhash.NewHashFromStr( + "270714425ea73eb8" + + "5942f0f705788f25" + + "1fefa3f533410a3f" + + "338de46e641082c4") + require.NoError(t, err) + return *hash + }(), + func() chainhash.Hash { + hash, err := chainhash.NewHashFromStr( + "e5c2407ba454ffeb" + + "28cf0c50c5c293a8" + + "4c9a75788f8a8f35" + + "ccb974e606280377") + require.NoError(t, err) + return *hash + }(), + }, + expectedNumLeaves: 6, + }, + } + + for _, test := range tests { + s := newRollingMerkleTreeStore(uint64(len(test.leaves))) + for _, leaf := range test.leaves { + s.add(leaf) + } + + require.Equal(t, s.roots, test.expectedRoots) + require.Equal(t, s.numLeaves, test.expectedNumLeaves) + } +} diff --git a/blockchain/sizehelper.go b/blockchain/sizehelper.go new file mode 100644 index 0000000000..4904a8e4c3 --- /dev/null +++ b/blockchain/sizehelper.go @@ -0,0 +1,239 @@ +// Copyright (c) 2023 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. +package blockchain + +import ( + "math" +) + +// These constants are related to bitcoin. +const ( + // outpointSize is the size of an outpoint. + // + // This value is calculated by running the following: + // unsafe.Sizeof(wire.OutPoint{}) + outpointSize = 36 + + // uint64Size is the size of an uint64 allocated in memory. + uint64Size = 8 + + // bucketSize is the size of the bucket in the cache map. Exact + // calculation is (16 + keysize*8 + valuesize*8) where for the map of: + // map[wire.OutPoint]*UtxoEntry would have a keysize=36 and valuesize=8. + // + // https://github.com/golang/go/issues/34561#issuecomment-536115805 + bucketSize = 16 + uint64Size*outpointSize + uint64Size*uint64Size + + // This value is calculated by running the following on a 64-bit system: + // unsafe.Sizeof(UtxoEntry{}) + baseEntrySize = 40 + + // pubKeyHashLen is the length of a P2PKH script. + pubKeyHashLen = 25 + + // avgEntrySize is how much each entry we expect it to be. Since most + // txs are p2pkh, we can assume the entry to be more or less the size + // of a p2pkh tx. We add on 7 to make it 32 since 64 bit systems will + // align by 8 bytes. + avgEntrySize = baseEntrySize + (pubKeyHashLen + 7) +) + +// The code here is shamelessely taken from the go runtime package. All the relevant +// code and variables are copied to here. These values are only correct for a 64 bit +// system. + +const ( + _MaxSmallSize = 32768 + smallSizeDiv = 8 + smallSizeMax = 1024 + largeSizeDiv = 128 + _NumSizeClasses = 68 + _PageShift = 13 + _PageSize = 1 << _PageShift + + MaxUintptr = ^uintptr(0) + + // Maximum number of key/elem pairs a bucket can hold. + bucketCntBits = 3 + bucketCnt = 1 << bucketCntBits + + // Maximum average load of a bucket that triggers growth is 6.5. + // Represent as loadFactorNum/loadFactorDen, to allow integer math. + loadFactorNum = 13 + loadFactorDen = 2 + + // _64bit = 1 on 64-bit systems, 0 on 32-bit systems + _64bit = 1 << (^uintptr(0) >> 63) / 2 + + // PtrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) + // but as an ideal constant. It is also the size of the machine's native + // word size (that is, 4 on 32-bit systems, 8 on 64-bit). + PtrSize = 4 << (^uintptr(0) >> 63) + + // heapAddrBits is the number of bits in a heap address that's actually + // available for memory allocation. + // + // NOTE (guggero): For 64-bit systems, we just assume 40 bits of address + // space available, as that seems to be the lowest common denominator. + // See heapAddrBits in runtime/malloc.go of the standard library for + // more details + heapAddrBits = 32 + (_64bit * 8) + + // maxAlloc is the maximum size of an allocation on the heap. + // + // NOTE(guggero): With the somewhat simplified heapAddrBits calculation + // above, this will currently limit the maximum allocation size of the + // UTXO cache to around 300GiB on 64-bit systems. This should be more + // than enough for the foreseeable future, but if we ever need to + // increase it, we should probably use the same calculation as the + // standard library. + maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1 +) + +var class_to_size = [_NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768} +var size_to_class8 = [smallSizeMax/smallSizeDiv + 1]uint8{0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32} +var size_to_class128 = [(_MaxSmallSize-smallSizeMax)/largeSizeDiv + 1]uint8{32, 33, 34, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 40, 41, 41, 41, 42, 43, 43, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 48, 48, 48, 49, 49, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67} + +// calculateRoughMapSize returns a close enough estimate of the +// total memory allocated by a map. +// hint should be the same value as the number you give when +// making a map with the following syntax: make(map[k]v, hint) +// +// bucketsize is (16 + keysize*8 + valuesize*8). For a map of: +// map[int64]int64, keysize=8 and valuesize=8. There are edge cases +// where the bucket size is different that I can't find the source code +// for. https://github.com/golang/go/issues/34561#issuecomment-536115805 +// +// I suspect it's because of alignment and how the compiler handles it but +// when compared with how much the compiler allocates, it's a couple hundred +// bytes off. +func calculateRoughMapSize(hint int, bucketSize uintptr) int { + // This code is copied from makemap() in runtime/map.go. + // + // TODO check once in a while to see if this algorithm gets + // changed. + mem, overflow := mulUintptr(uintptr(hint), uintptr(bucketSize)) + if overflow || mem > maxAlloc { + hint = 0 + } + + // Find the size parameter B which will hold the requested # of elements. + // For hint < 0 overLoadFactor returns false since hint < bucketCnt. + B := uint8(0) + for overLoadFactor(hint, B) { + B++ + } + + // This code is copied from makeBucketArray() in runtime/map.go. + // + // TODO check once in a while to see if this algorithm gets + // changed. + // + // For small b, overflow buckets are unlikely. + // Avoid the overhead of the calculation. + base := bucketShift(B) + numBuckets := base + if B >= 4 { + // Add on the estimated number of overflow buckets + // required to insert the median number of elements + // used with this value of b. + numBuckets += bucketShift(B - 4) + sz := bucketSize * numBuckets + up := roundupsize(sz) + if up != sz { + numBuckets = up / bucketSize + } + } + total, _ := mulUintptr(bucketSize, numBuckets) + + if base != numBuckets { + // Add 24 for mapextra struct overhead. Refer to + // runtime/map.go in the std library for the struct. + total += 24 + } + + // 48 is the number of bytes needed for the map header in a + // 64 bit system. Refer to hmap in runtime/map.go in the go + // standard library. + total += 48 + return int(total) +} + +// calculateMinEntries returns the minimum number of entries that will make the +// map allocate the given total bytes. -1 on the returned entry count will +// make the map allocate half as much total bytes (for returned entry count that's +// greater than 0). +func calculateMinEntries(totalBytes int, bucketSize int) int { + // 48 is the number of bytes needed for the map header in a + // 64 bit system. Refer to hmap in runtime/map.go in the go + // standard library. + totalBytes -= 48 + + numBuckets := totalBytes / bucketSize + B := uint8(math.Log2(float64(numBuckets))) + if B < 4 { + switch B { + case 0: + return 0 + case 1: + return 9 + case 2: + return 14 + default: + return 27 + } + } + + B -= 1 + + return (int(loadFactorNum * (bucketShift(B) / loadFactorDen))) + 1 +} + +// mulUintptr returns a * b and whether the multiplication overflowed. +// On supported platforms this is an intrinsic lowered by the compiler. +func mulUintptr(a, b uintptr) (uintptr, bool) { + if a|b < 1<<(4*PtrSize) || a == 0 { + return a * b, false + } + overflow := b > MaxUintptr/a + return a * b, overflow +} + +// divRoundUp returns ceil(n / a). +func divRoundUp(n, a uintptr) uintptr { + // a is generally a power of two. This will get inlined and + // the compiler will optimize the division. + return (n + a - 1) / a +} + +// alignUp rounds n up to a multiple of a. a must be a power of 2. +func alignUp(n, a uintptr) uintptr { + return (n + a - 1) &^ (a - 1) +} + +// Returns size of the memory block that mallocgc will allocate if you ask for the size. +func roundupsize(size uintptr) uintptr { + if size < _MaxSmallSize { + if size <= smallSizeMax-8 { + return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]]) + } else { + return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]]) + } + } + if size+_PageSize < size { + return size + } + return alignUp(size, _PageSize) +} + +// overLoadFactor reports whether count items placed in 1< bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) +} + +// bucketShift returns 1< 0 { + gotMinRoughMapSizeWrong := calculateRoughMapSize(minEntries-1, bucketSize) + if gotMinRoughMapSize == gotMinRoughMapSizeWrong { + t.Errorf("For hint %d decremented by 1, expected %v, got %v\n", + i, gotRoughMapSizeWrong/2, gotRoughMapSizeWrong) + } + } + } +} diff --git a/blockchain/testdata/blk_0_to_14131.dat b/blockchain/testdata/blk_0_to_14131.dat new file mode 100644 index 0000000000..cc1a789e40 Binary files /dev/null and b/blockchain/testdata/blk_0_to_14131.dat differ diff --git a/blockchain/thresholdstate.go b/blockchain/thresholdstate.go index 35653bf8fc..d62c2de3c2 100644 --- a/blockchain/thresholdstate.go +++ b/blockchain/thresholdstate.go @@ -153,7 +153,7 @@ func (b *BlockChain) PastMedianTime(blockHeader *wire.BlockHeader) (time.Time, e blockNode := newBlockNode(blockHeader, prevNode) - return blockNode.CalcPastMedianTime(), nil + return CalcPastMedianTime(blockNode), nil } // thresholdStateTransition given a state, a previous node, and a toeholds diff --git a/blockchain/upgrade.go b/blockchain/upgrade.go index 253ca62e1e..34149e44a8 100644 --- a/blockchain/upgrade.go +++ b/blockchain/upgrade.go @@ -232,24 +232,25 @@ func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, t // // The legacy format is as follows: // -//
[,...] +//
[,...] // -// Field Type Size -// version VLQ variable -// block height VLQ variable -// header code VLQ variable -// unspentness bitmap []byte variable -// compressed txouts -// compressed amount VLQ variable -// compressed script []byte variable +// Field Type Size +// version VLQ variable +// block height VLQ variable +// header code VLQ variable +// unspentness bitmap []byte variable +// compressed txouts +// compressed amount VLQ variable +// compressed script []byte variable // // The serialized header code format is: -// bit 0 - containing transaction is a coinbase -// bit 1 - output zero is unspent -// bit 2 - output one is unspent -// bits 3-x - number of bytes in unspentness bitmap. When both bits 1 and 2 -// are unset, it encodes N-1 since there must be at least one unspent -// output. +// +// bit 0 - containing transaction is a coinbase +// bit 1 - output zero is unspent +// bit 2 - output one is unspent +// bits 3-x - number of bytes in unspentness bitmap. When both bits 1 and 2 +// are unset, it encodes N-1 since there must be at least one unspent +// output. // // The rationale for the header code scheme is as follows: // - Transactions which only pay to a single output and a change output are @@ -269,65 +270,65 @@ func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, t // From tx in main blockchain: // Blk 1, 0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098 // -// 010103320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52 -// <><><><------------------------------------------------------------------> -// | | \--------\ | -// | height | compressed txout 0 -// version header code +// 010103320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52 +// <><><><------------------------------------------------------------------> +// | | \--------\ | +// | height | compressed txout 0 +// version header code // -// - version: 1 -// - height: 1 -// - header code: 0x03 (coinbase, output zero unspent, 0 bytes of unspentness) -// - unspentness: Nothing since it is zero bytes -// - compressed txout 0: -// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC) -// - 0x04: special script type pay-to-pubkey -// - 0x96...52: x-coordinate of the pubkey +// - version: 1 +// - height: 1 +// - header code: 0x03 (coinbase, output zero unspent, 0 bytes of unspentness) +// - unspentness: Nothing since it is zero bytes +// - compressed txout 0: +// - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC) +// - 0x04: special script type pay-to-pubkey +// - 0x96...52: x-coordinate of the pubkey // // Example 2: // From tx in main blockchain: // Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f // -// 0185f90b0a011200e2ccd6ec7c6e2e581349c77e067385fa8236bf8a800900b8025be1b3efc63b0ad48e7f9f10e87544528d58 -// <><----><><><------------------------------------------><--------------------------------------------> -// | | | \-------------------\ | | -// version | \--------\ unspentness | compressed txout 2 -// height header code compressed txout 0 +// 0185f90b0a011200e2ccd6ec7c6e2e581349c77e067385fa8236bf8a800900b8025be1b3efc63b0ad48e7f9f10e87544528d58 +// <><----><><><------------------------------------------><--------------------------------------------> +// | | | \-------------------\ | | +// version | \--------\ unspentness | compressed txout 2 +// height header code compressed txout 0 // -// - version: 1 -// - height: 113931 -// - header code: 0x0a (output zero unspent, 1 byte in unspentness bitmap) -// - unspentness: [0x01] (bit 0 is set, so output 0+2 = 2 is unspent) -// NOTE: It's +2 since the first two outputs are encoded in the header code -// - compressed txout 0: -// - 0x12: VLQ-encoded compressed amount for 20000000 (0.2 BTC) -// - 0x00: special script type pay-to-pubkey-hash -// - 0xe2...8a: pubkey hash -// - compressed txout 2: -// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC) -// - 0x00: special script type pay-to-pubkey-hash -// - 0xb8...58: pubkey hash +// - version: 1 +// - height: 113931 +// - header code: 0x0a (output zero unspent, 1 byte in unspentness bitmap) +// - unspentness: [0x01] (bit 0 is set, so output 0+2 = 2 is unspent) +// NOTE: It's +2 since the first two outputs are encoded in the header code +// - compressed txout 0: +// - 0x12: VLQ-encoded compressed amount for 20000000 (0.2 BTC) +// - 0x00: special script type pay-to-pubkey-hash +// - 0xe2...8a: pubkey hash +// - compressed txout 2: +// - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC) +// - 0x00: special script type pay-to-pubkey-hash +// - 0xb8...58: pubkey hash // // Example 3: // From tx in main blockchain: // Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620 // -// 0193d06c100000108ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6 -// <><----><><----><--------------------------------------------------> -// | | | \-----------------\ | -// version | \--------\ unspentness | -// height header code compressed txout 22 +// 0193d06c100000108ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6 +// <><----><><----><--------------------------------------------------> +// | | | \-----------------\ | +// version | \--------\ unspentness | +// height header code compressed txout 22 // -// - version: 1 -// - height: 338156 -// - header code: 0x10 (2+1 = 3 bytes in unspentness bitmap) -// NOTE: It's +1 since neither bit 1 nor 2 are set, so N-1 is encoded. -// - unspentness: [0x00 0x00 0x10] (bit 20 is set, so output 20+2 = 22 is unspent) -// NOTE: It's +2 since the first two outputs are encoded in the header code -// - compressed txout 22: -// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC) -// - 0x01: special script type pay-to-script-hash -// - 0x1d...e6: script hash +// - version: 1 +// - height: 338156 +// - header code: 0x10 (2+1 = 3 bytes in unspentness bitmap) +// NOTE: It's +1 since neither bit 1 nor 2 are set, so N-1 is encoded. +// - unspentness: [0x00 0x00 0x10] (bit 20 is set, so output 20+2 = 22 is unspent) +// NOTE: It's +2 since the first two outputs are encoded in the header code +// - compressed txout 22: +// - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC) +// - 0x01: special script type pay-to-script-hash +// - 0x1d...e6: script hash func deserializeUtxoEntryV0(serialized []byte) (map[uint32]*UtxoEntry, error) { // Deserialize the version. // diff --git a/blockchain/utxocache.go b/blockchain/utxocache.go new file mode 100644 index 0000000000..af7a3b7b6f --- /dev/null +++ b/blockchain/utxocache.go @@ -0,0 +1,744 @@ +// Copyright (c) 2023 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package blockchain + +import ( + "container/list" + "fmt" + "sync" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/database" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" +) + +// mapSlice is a slice of maps for utxo entries. The slice of maps are needed to +// guarantee that the map will only take up N amount of bytes. As of v1.20, the +// go runtime will allocate 2^N + few extra buckets, meaning that for large N, we'll +// allocate a lot of extra memory if the amount of entries goes over the previously +// allocated buckets. A slice of maps allows us to have a better control of how much +// total memory gets allocated by all the maps. +type mapSlice struct { + // mtx protects against concurrent access for the map slice. + mtx sync.Mutex + + // maps are the underlying maps in the slice of maps. + maps []map[wire.OutPoint]*UtxoEntry + + // maxEntries is the maximum amount of elements that the map is allocated for. + maxEntries []int + + // maxTotalMemoryUsage is the maximum memory usage in bytes that the state + // should contain in normal circumstances. + maxTotalMemoryUsage uint64 +} + +// length returns the length of all the maps in the map slice added together. +// +// This function is safe for concurrent access. +func (ms *mapSlice) length() int { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + var l int + for _, m := range ms.maps { + l += len(m) + } + + return l +} + +// size returns the size of all the maps in the map slice added together. +// +// This function is safe for concurrent access. +func (ms *mapSlice) size() int { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + var size int + for _, num := range ms.maxEntries { + size += calculateRoughMapSize(num, bucketSize) + } + + return size +} + +// get looks for the outpoint in all the maps in the map slice and returns +// the entry. nil and false is returned if the outpoint is not found. +// +// This function is safe for concurrent access. +func (ms *mapSlice) get(op wire.OutPoint) (*UtxoEntry, bool) { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + var entry *UtxoEntry + var found bool + + for _, m := range ms.maps { + entry, found = m[op] + if found { + return entry, found + } + } + + return nil, false +} + +// put puts the outpoint and the entry into one of the maps in the map slice. If the +// existing maps are all full, it will allocate a new map based on how much memory we +// have left over. Leftover memory is calculated as: +// maxTotalMemoryUsage - (totalEntryMemory + mapSlice.size()) +// +// This function is safe for concurrent access. +func (ms *mapSlice) put(op wire.OutPoint, entry *UtxoEntry, totalEntryMemory uint64) { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + for i, maxNum := range ms.maxEntries { + m := ms.maps[i] + _, found := m[op] + if found { + // If the key is found, overwrite it. + m[op] = entry + return // Return as we were successful in adding the entry. + } + if len(m) >= maxNum { + // Don't try to insert if the map already at max since + // that'll force the map to allocate double the memory it's + // currently taking up. + continue + } + + m[op] = entry + return // Return as we were successful in adding the entry. + } + + // We only reach this code if we've failed to insert into the map above as + // all the current maps were full. We thus make a new map and insert into + // it. + m := ms.makeNewMap(totalEntryMemory) + m[op] = entry +} + +// delete attempts to delete the given outpoint in all of the maps. No-op if the +// outpoint doesn't exist. +// +// This function is safe for concurrent access. +func (ms *mapSlice) delete(op wire.OutPoint) { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + for i := 0; i < len(ms.maps); i++ { + delete(ms.maps[i], op) + } +} + +// makeNewMap makes and appends the new map into the map slice. +// +// This function is NOT safe for concurrent access and must be called with the +// lock held. +func (ms *mapSlice) makeNewMap(totalEntryMemory uint64) map[wire.OutPoint]*UtxoEntry { + // Get the size of the leftover memory. + memSize := ms.maxTotalMemoryUsage - totalEntryMemory + for _, maxNum := range ms.maxEntries { + memSize -= uint64(calculateRoughMapSize(maxNum, bucketSize)) + } + + // Get a new map that's sized to house inside the leftover memory. + // -1 on the returned value will make the map allocate half as much total + // bytes. This is done to make sure there's still room left for utxo + // entries to take up. + numMaxElements := calculateMinEntries(int(memSize), bucketSize+avgEntrySize) + numMaxElements -= 1 + ms.maxEntries = append(ms.maxEntries, numMaxElements) + ms.maps = append(ms.maps, make(map[wire.OutPoint]*UtxoEntry, numMaxElements)) + + return ms.maps[len(ms.maps)-1] +} + +// deleteMaps deletes all maps except for the first one which should be the biggest. +// +// This function is safe for concurrent access. +func (ms *mapSlice) deleteMaps() { + ms.mtx.Lock() + defer ms.mtx.Unlock() + + size := ms.maxEntries[0] + ms.maxEntries = []int{size} + ms.maps = ms.maps[:1] +} + +const ( + // utxoFlushPeriodicInterval is the interval at which a flush is performed + // when the flush mode FlushPeriodic is used. This is used when the initial + // block download is complete and it's useful to flush periodically in case + // of unforseen shutdowns. + utxoFlushPeriodicInterval = time.Minute * 5 +) + +// FlushMode is used to indicate the different urgency types for a flush. +type FlushMode uint8 + +const ( + // FlushRequired is the flush mode that means a flush must be performed + // regardless of the cache state. For example right before shutting down. + FlushRequired FlushMode = iota + + // FlushPeriodic is the flush mode that means a flush can be performed + // when it would be almost needed. This is used to periodically signal when + // no I/O heavy operations are expected soon, so there is time to flush. + FlushPeriodic + + // FlushIfNeeded is the flush mode that means a flush must be performed only + // if the cache is exceeding a safety threshold very close to its maximum + // size. This is used mostly internally in between operations that can + // increase the cache size. + FlushIfNeeded +) + +// utxoCache is a cached utxo view in the chainstate of a BlockChain. +type utxoCache struct { + db database.DB + + // maxTotalMemoryUsage is the maximum memory usage in bytes that the state + // should contain in normal circumstances. + maxTotalMemoryUsage uint64 + + // cachedEntries keeps the internal cache of the utxo state. The tfModified + // flag indicates that the state of the entry (potentially) deviates from the + // state in the database. Explicit nil values in the map are used to + // indicate that the database does not contain the entry. + cachedEntries mapSlice + totalEntryMemory uint64 // Total memory usage in bytes. + + // Below fields are used to indicate when the last flush happened. + lastFlushHash chainhash.Hash + lastFlushTime time.Time +} + +// newUtxoCache initiates a new utxo cache instance with its memory usage limited +// to the given maximum. +func newUtxoCache(db database.DB, maxTotalMemoryUsage uint64) *utxoCache { + // While the entry isn't included in the map size, add the average size to the + // bucket size so we get some leftover space for entries to take up. + numMaxElements := calculateMinEntries(int(maxTotalMemoryUsage), bucketSize+avgEntrySize) + numMaxElements -= 1 + + log.Infof("Pre-alloacting for %d MiB: ", maxTotalMemoryUsage/(1024*1024)+1) + + m := make(map[wire.OutPoint]*UtxoEntry, numMaxElements) + + return &utxoCache{ + db: db, + maxTotalMemoryUsage: maxTotalMemoryUsage, + cachedEntries: mapSlice{ + maps: []map[wire.OutPoint]*UtxoEntry{m}, + maxEntries: []int{numMaxElements}, + maxTotalMemoryUsage: maxTotalMemoryUsage, + }, + } +} + +// totalMemoryUsage returns the total memory usage in bytes of the UTXO cache. +func (s *utxoCache) totalMemoryUsage() uint64 { + // Total memory is the map size + the size that the utxo entries are + // taking up. + size := uint64(s.cachedEntries.size()) + size += s.totalEntryMemory + + return size +} + +// fetchEntries returns the UTXO entries for the given outpoints. The function always +// returns as many entries as there are outpoints and the returns entries are in the +// same order as the outpoints. It returns nil if there is no entry for the outpoint +// in the UTXO set. +// +// The returned entries are NOT safe for concurrent access. +func (s *utxoCache) fetchEntries(outpoints []wire.OutPoint) ([]*UtxoEntry, error) { + entries := make([]*UtxoEntry, len(outpoints)) + var ( + missingOps []wire.OutPoint + missingOpsIdx []int + ) + for i := range outpoints { + if entry, ok := s.cachedEntries.get(outpoints[i]); ok { + entries[i] = entry + continue + } + + // At this point, we have missing outpoints. Allocate them now + // so that we never allocate if the cache never misses. + if len(missingOps) == 0 { + missingOps = make([]wire.OutPoint, 0, len(outpoints)) + missingOpsIdx = make([]int, 0, len(outpoints)) + } + + missingOpsIdx = append(missingOpsIdx, i) + missingOps = append(missingOps, outpoints[i]) + } + + // Return early and don't attempt access the database if we don't have any + // missing outpoints. + if len(missingOps) == 0 { + return entries, nil + } + + // Fetch the missing outpoints in the cache from the database. + dbEntries := make([]*UtxoEntry, len(missingOps)) + err := s.db.View(func(dbTx database.Tx) error { + utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) + + for i := range missingOps { + entry, err := dbFetchUtxoEntry(dbTx, utxoBucket, missingOps[i]) + if err != nil { + return err + } + + dbEntries[i] = entry + } + + return nil + }) + if err != nil { + return nil, err + } + + // Add each of the entries to the UTXO cache and update their memory + // usage. + // + // NOTE: When the fetched entry is nil, it is still added to the cache + // as a miss; this prevents future lookups to perform the same database + // fetch. + for i := range dbEntries { + s.cachedEntries.put(missingOps[i], dbEntries[i], s.totalEntryMemory) + s.totalEntryMemory += dbEntries[i].memoryUsage() + } + + // Fill in the entries with the ones fetched from the database. + for i := range missingOpsIdx { + entries[missingOpsIdx[i]] = dbEntries[i] + } + + return entries, nil +} + +// addTxOut adds the specified output to the cache if it is not provably +// unspendable. When the cache already has an entry for the output, it will be +// overwritten with the given output. All fields will be updated for existing +// entries since it's possible it has changed during a reorg. +func (s *utxoCache) addTxOut(outpoint wire.OutPoint, txOut *wire.TxOut, isCoinBase bool, + blockHeight int32) error { + + // Don't add provably unspendable outputs. + if txscript.IsUnspendable(txOut.PkScript) { + return nil + } + + entry := new(UtxoEntry) + entry.amount = txOut.Value + + // Deep copy the script when the script in the entry differs from the one in + // the txout. This is required since the txout script is a subslice of the + // overall contiguous buffer that the msg tx houses for all scripts within + // the tx. It is deep copied here since this entry may be added to the utxo + // cache, and we don't want the utxo cache holding the entry to prevent all + // of the other tx scripts from getting garbage collected. + entry.pkScript = make([]byte, len(txOut.PkScript)) + copy(entry.pkScript, txOut.PkScript) + + entry.blockHeight = blockHeight + entry.packedFlags = tfFresh | tfModified + if isCoinBase { + entry.packedFlags |= tfCoinBase + } + + s.cachedEntries.put(outpoint, entry, s.totalEntryMemory) + s.totalEntryMemory += entry.memoryUsage() + + return nil +} + +// addTxOuts adds all outputs in the passed transaction which are not provably +// unspendable to the view. When the view already has entries for any of the +// outputs, they are simply marked unspent. All fields will be updated for +// existing entries since it's possible it has changed during a reorg. +func (s *utxoCache) addTxOuts(tx *btcutil.Tx, blockHeight int32) error { + // Loop all of the transaction outputs and add those which are not + // provably unspendable. + isCoinBase := IsCoinBase(tx) + prevOut := wire.OutPoint{Hash: *tx.Hash()} + for txOutIdx, txOut := range tx.MsgTx().TxOut { + // Update existing entries. All fields are updated because it's + // possible (although extremely unlikely) that the existing + // entry is being replaced by a different transaction with the + // same hash. This is allowed so long as the previous + // transaction is fully spent. + prevOut.Index = uint32(txOutIdx) + err := s.addTxOut(prevOut, txOut, isCoinBase, blockHeight) + if err != nil { + return err + } + } + + return nil +} + +// addTxIn will add the given input to the cache if the previous outpoint the txin +// is pointing to exists in the utxo set. The utxo that is being spent by the input +// will be marked as spent and if the utxo is fresh (meaning that the database on disk +// never saw it), it will be removed from the cache. +func (s *utxoCache) addTxIn(txIn *wire.TxIn, stxos *[]SpentTxOut) error { + // Ensure the referenced utxo exists in the view. This should + // never happen unless there is a bug is introduced in the code. + entries, err := s.fetchEntries([]wire.OutPoint{txIn.PreviousOutPoint}) + if err != nil { + return err + } + if len(entries) != 1 || entries[0] == nil { + return AssertError(fmt.Sprintf("missing input %v", + txIn.PreviousOutPoint)) + } + + // Only create the stxo details if requested. + entry := entries[0] + if stxos != nil { + // Populate the stxo details using the utxo entry. + stxo := SpentTxOut{ + Amount: entry.Amount(), + PkScript: entry.PkScript(), + Height: entry.BlockHeight(), + IsCoinBase: entry.IsCoinBase(), + } + + *stxos = append(*stxos, stxo) + } + + // Mark the entry as spent. + entry.Spend() + + // If an entry is fresh it indicates that this entry was spent before it could be + // flushed to the database. Because of this, we can just delete it from the map of + // cached entries. + if entry.isFresh() { + // If the entry is fresh, we will always have it in the cache. + s.cachedEntries.delete(txIn.PreviousOutPoint) + s.totalEntryMemory -= entry.memoryUsage() + } else { + // Can leave the entry to be garbage collected as the only purpose + // of this entry now is so that the entry on disk can be deleted. + entry = nil + s.totalEntryMemory -= entry.memoryUsage() + } + + return nil +} + +// addTxIns will add the given inputs of the tx if it's not a coinbase tx and if +// the previous output that the input is pointing to exists in the utxo set. The +// utxo that is being spent by the input will be marked as spent and if the utxo +// is fresh (meaning that the database on disk never saw it), it will be removed +// from the cache. +func (s *utxoCache) addTxIns(tx *btcutil.Tx, stxos *[]SpentTxOut) error { + // Coinbase transactions don't have any inputs to spend. + if IsCoinBase(tx) { + return nil + } + + for _, txIn := range tx.MsgTx().TxIn { + err := s.addTxIn(txIn, stxos) + if err != nil { + return err + } + } + + return nil +} + +// connectTransaction updates the cache by adding all new utxos created by the +// passed transaction and marking and/or removing all utxos that the transactions +// spend as spent. In addition, when the 'stxos' argument is not nil, it will +// be updated to append an entry for each spent txout. An error will be returned +// if the cache and the database does not contain the required utxos. +func (s *utxoCache) connectTransaction( + tx *btcutil.Tx, blockHeight int32, stxos *[]SpentTxOut) error { + + err := s.addTxIns(tx, stxos) + if err != nil { + return err + } + + // Add the transaction's outputs as available utxos. + return s.addTxOuts(tx, blockHeight) +} + +// connectTransactions updates the cache by adding all new utxos created by all +// of the transactions in the passed block, marking and/or removing all utxos +// the transactions spend as spent, and setting the best hash for the view to +// the passed block. In addition, when the 'stxos' argument is not nil, it will +// be updated to append an entry for each spent txout. +func (s *utxoCache) connectTransactions(block *btcutil.Block, stxos *[]SpentTxOut) error { + for _, tx := range block.Transactions() { + err := s.connectTransaction(tx, block.Height(), stxos) + if err != nil { + return err + } + } + + return nil +} + +// writeCache writes all the entries that are cached in memory to the database atomically. +func (s *utxoCache) writeCache(dbTx database.Tx, bestState *BestState) error { + // Update commits and flushes the cache to the database. + // NOTE: The database has its own cache which gets atomically written + // to leveldb. + utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) + for i := range s.cachedEntries.maps { + for outpoint, entry := range s.cachedEntries.maps[i] { + switch { + // If the entry is nil or spent, remove the entry from the database + // and the cache. + case entry == nil || entry.IsSpent(): + err := dbDeleteUtxoEntry(utxoBucket, outpoint) + if err != nil { + return err + } + + // No need to update the cache if the entry was not modified. + case !entry.isModified(): + default: + // Entry is fresh and needs to be put into the database. + err := dbPutUtxoEntry(utxoBucket, outpoint, entry) + if err != nil { + return err + } + } + + delete(s.cachedEntries.maps[i], outpoint) + } + } + s.cachedEntries.deleteMaps() + s.totalEntryMemory = 0 + + // When done, store the best state hash in the database to indicate the state + // is consistent until that hash. + err := dbPutUtxoStateConsistency(dbTx, &bestState.Hash) + if err != nil { + return err + } + + // The best state is the new last flush hash. + s.lastFlushHash = bestState.Hash + s.lastFlushTime = time.Now() + + return nil +} + +// flush flushes the UTXO state to the database if a flush is needed with the given flush mode. +// +// This function MUST be called with the chain state lock held (for writes). +func (s *utxoCache) flush(dbTx database.Tx, mode FlushMode, bestState *BestState) error { + var threshold uint64 + switch mode { + case FlushRequired: + threshold = 0 + + case FlushIfNeeded: + // If we performed a flush in the current best state, we have nothing to do. + if bestState.Hash == s.lastFlushHash { + return nil + } + + threshold = s.maxTotalMemoryUsage + + case FlushPeriodic: + // If the time since the last flush is over the periodic interval, + // force a flush. Otherwise just flush when the cache is full. + if time.Since(s.lastFlushTime) > utxoFlushPeriodicInterval { + threshold = 0 + } else { + threshold = s.maxTotalMemoryUsage + } + } + + if s.totalMemoryUsage() >= threshold { + // Add one to round up the integer division. + totalMiB := s.totalMemoryUsage() / ((1024 * 1024) + 1) + log.Infof("Flushing UTXO cache of %d MiB with %d entries to disk. For large sizes, "+ + "this can take up to several minutes...", totalMiB, s.cachedEntries.length()) + + return s.writeCache(dbTx, bestState) + } + + return nil +} + +// FlushUtxoCache flushes the UTXO state to the database if a flush is needed with the +// given flush mode. +// +// This function is safe for concurrent access. +func (b *BlockChain) FlushUtxoCache(mode FlushMode) error { + b.chainLock.Lock() + defer b.chainLock.Unlock() + + return b.db.Update(func(dbTx database.Tx) error { + return b.utxoCache.flush(dbTx, mode, b.BestSnapshot()) + }) +} + +// InitConsistentState checks the consistency status of the utxo state and +// replays blocks if it lags behind the best state of the blockchain. +// +// It needs to be ensured that the chainView passed to this method does not +// get changed during the execution of this method. +func (b *BlockChain) InitConsistentState(tip *blockNode, interrupt <-chan struct{}) error { + s := b.utxoCache + + // Load the consistency status from the database. + var statusBytes []byte + s.db.View(func(dbTx database.Tx) error { + statusBytes = dbFetchUtxoStateConsistency(dbTx) + return nil + }) + + // If no status was found, the database is old and didn't have a cached utxo + // state yet. In that case, we set the status to the best state and write + // this to the database. + if statusBytes == nil { + err := s.db.Update(func(dbTx database.Tx) error { + return dbPutUtxoStateConsistency(dbTx, &tip.hash) + }) + + // Set the last flush hash as it's the default value of 0s. + s.lastFlushHash = tip.hash + + return err + } + + statusHash, err := chainhash.NewHash(statusBytes) + if err != nil { + return err + } + + // If state is consistent, we are done. + if statusHash.IsEqual(&tip.hash) { + log.Debugf("UTXO state consistent at (%d:%v)", tip.height, tip.hash) + + // The last flush hash is set to the default value of all 0s. Set + // it to the tip since we checked it's consistent. + s.lastFlushHash = tip.hash + + return nil + } + + lastFlushNode := b.index.LookupNode(statusHash) + log.Infof("Reconstructing UTXO state after an unclean shutdown. The UTXO state is "+ + "consistent at block %s (%d) but the chainstate is at block %s (%d), This may "+ + "take a long time...", statusHash.String(), lastFlushNode.height, + tip.hash.String(), tip.height) + + // Even though this should always be true, make sure the fetched hash is in + // the best chain. + fork := b.bestChain.FindFork(lastFlushNode) + if fork == nil { + return AssertError(fmt.Sprintf("last utxo consistency status contains "+ + "hash that is not in best chain: %v", statusHash)) + } + + // We never disconnect blocks as they cannot be inconsistent during a reorganization. + // This is because The cache is flushed before the reorganization begins and the utxo + // set at each block disconnect is written atomically to the database. + node := lastFlushNode + + // We replay the blocks from the last consistent state up to the best + // state. Iterate forward from the consistent node to the tip of the best + // chain. + attachNodes := list.New() + for n := tip; n.height >= 0; n = n.parent { + if n == fork { + break + } + attachNodes.PushFront(n) + } + + for e := attachNodes.Front(); e != nil; e = e.Next() { + node = e.Value.(*blockNode) + + var block *btcutil.Block + err := s.db.View(func(dbTx database.Tx) error { + block, err = dbFetchBlockByNode(dbTx, node) + if err != nil { + return err + } + + return err + }) + if err != nil { + return err + } + + err = b.utxoCache.connectTransactions(block, nil) + if err != nil { + return err + } + + // Flush the utxo cache if needed. This will in turn update the + // consistent state to this block. + err = s.db.Update(func(dbTx database.Tx) error { + return s.flush(dbTx, FlushIfNeeded, &BestState{Hash: node.hash, Height: node.height}) + }) + if err != nil { + return err + } + + if interruptRequested(interrupt) { + log.Warn("UTXO state reconstruction interrupted") + + return errInterruptRequested + } + } + log.Debug("UTXO state reconstruction done") + + // Set the last flush hash as it's the default value of 0s. + s.lastFlushHash = tip.hash + s.lastFlushTime = time.Now() + + return nil +} + +// flushNeededAfterPrune returns true if the utxo cache needs to be flushed after a prune +// of the block storage. In the case of an unexpected shutdown, the utxo cache needs +// to be reconstructed from where the utxo cache was last flushed. In order for the +// utxo cache to be reconstructed, we always need to have the blocks since the utxo cache +// flush last happened. +// +// Example: if the last flush hash was at height 100 and one of the deleted blocks was at +// height 98, this function will return true. +func (b *BlockChain) flushNeededAfterPrune(deletedBlockHashes []chainhash.Hash) (bool, error) { + lastFlushHeight, err := b.BlockHeightByHash(&b.utxoCache.lastFlushHash) + if err != nil { + return false, err + } + + // Loop through all the block hashes and find out what the highest block height + // among the deleted hashes is. + highestDeletedHeight := int32(-1) + for _, deletedBlockHash := range deletedBlockHashes { + height, err := b.BlockHeightByHash(&deletedBlockHash) + if err != nil { + return false, err + } + + if height > highestDeletedHeight { + highestDeletedHeight = height + } + } + + return highestDeletedHeight >= lastFlushHeight, nil +} diff --git a/blockchain/utxocache_test.go b/blockchain/utxocache_test.go new file mode 100644 index 0000000000..20b2a5b34a --- /dev/null +++ b/blockchain/utxocache_test.go @@ -0,0 +1,915 @@ +// Copyright (c) 2023 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. +package blockchain + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "path/filepath" + "reflect" + "sync" + "testing" + "time" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/database" + "github.com/btcsuite/btcd/database/ffldb" + "github.com/btcsuite/btcd/wire" +) + +func TestMapSlice(t *testing.T) { + tests := []struct { + keys []wire.OutPoint + }{ + { + keys: func() []wire.OutPoint { + outPoints := make([]wire.OutPoint, 1000) + for i := uint32(0); i < uint32(len(outPoints)); i++ { + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], i) + hash := sha256.Sum256(buf[:]) + + op := wire.OutPoint{Hash: hash, Index: i} + outPoints[i] = op + } + return outPoints + }(), + }, + } + + for _, test := range tests { + m := make(map[wire.OutPoint]*UtxoEntry) + + maxSize := calculateRoughMapSize(1000, bucketSize) + + maxEntriesFirstMap := 500 + ms1 := make(map[wire.OutPoint]*UtxoEntry, maxEntriesFirstMap) + ms := mapSlice{ + maps: []map[wire.OutPoint]*UtxoEntry{ms1}, + maxEntries: []int{maxEntriesFirstMap}, + maxTotalMemoryUsage: uint64(maxSize), + } + + for _, key := range test.keys { + m[key] = nil + ms.put(key, nil, 0) + } + + // Put in the same elements twice to test that the map slice won't hold duplicates. + for _, key := range test.keys { + m[key] = nil + ms.put(key, nil, 0) + } + + if len(m) != ms.length() { + t.Fatalf("expected len of %d, got %d", len(m), ms.length()) + } + + for _, key := range test.keys { + expected, found := m[key] + if !found { + t.Fatalf("expected key %s to exist in the go map", key.String()) + } + + got, found := ms.get(key) + if !found { + t.Fatalf("expected key %s to exist in the map slice", key.String()) + } + + if !reflect.DeepEqual(got, expected) { + t.Fatalf("expected value of %v, got %v", expected, got) + } + } + } +} + +// TestMapsliceConcurrency just tests that the mapslice won't result in a panic +// on concurrent access. +func TestMapsliceConcurrency(t *testing.T) { + tests := []struct { + keys []wire.OutPoint + }{ + { + keys: func() []wire.OutPoint { + outPoints := make([]wire.OutPoint, 10000) + for i := uint32(0); i < uint32(len(outPoints)); i++ { + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], i) + hash := sha256.Sum256(buf[:]) + + op := wire.OutPoint{Hash: hash, Index: i} + outPoints[i] = op + } + return outPoints + }(), + }, + } + + for _, test := range tests { + maxSize := calculateRoughMapSize(1000, bucketSize) + + maxEntriesFirstMap := 500 + ms1 := make(map[wire.OutPoint]*UtxoEntry, maxEntriesFirstMap) + ms := mapSlice{ + maps: []map[wire.OutPoint]*UtxoEntry{ms1}, + maxEntries: []int{maxEntriesFirstMap}, + maxTotalMemoryUsage: uint64(maxSize), + } + + var wg sync.WaitGroup + + wg.Add(1) + go func(m *mapSlice, keys []wire.OutPoint) { + defer wg.Done() + for i := 0; i < 5000; i++ { + m.put(keys[i], nil, 0) + } + }(&ms, test.keys) + + wg.Add(1) + go func(m *mapSlice, keys []wire.OutPoint) { + defer wg.Done() + for i := 5000; i < 10000; i++ { + m.put(keys[i], nil, 0) + } + }(&ms, test.keys) + + wg.Add(1) + go func(m *mapSlice) { + defer wg.Done() + for i := 0; i < 10000; i++ { + m.size() + } + }(&ms) + + wg.Add(1) + go func(m *mapSlice) { + defer wg.Done() + for i := 0; i < 10000; i++ { + m.length() + } + }(&ms) + + wg.Add(1) + go func(m *mapSlice, keys []wire.OutPoint) { + defer wg.Done() + for i := 0; i < 10000; i++ { + m.get(keys[i]) + } + }(&ms, test.keys) + + wg.Add(1) + go func(m *mapSlice, keys []wire.OutPoint) { + defer wg.Done() + for i := 0; i < 5000; i++ { + m.delete(keys[i]) + } + }(&ms, test.keys) + + wg.Wait() + } +} + +// getValidP2PKHScript returns a valid P2PKH script. Useful as unspendables cannot be +// added to the cache. +func getValidP2PKHScript() []byte { + validP2PKHScript := []byte{ + // OP_DUP + 0x76, + // OP_HASH160 + 0xa9, + // OP_DATA_20 + 0x14, + // <20-byte pubkey hash> + 0xf0, 0x7a, 0xb8, 0xce, 0x72, 0xda, 0x4e, 0x76, + 0x0b, 0x74, 0x7d, 0x48, 0xd6, 0x65, 0xec, 0x96, + 0xad, 0xf0, 0x24, 0xf5, + // OP_EQUALVERIFY + 0x88, + // OP_CHECKSIG + 0xac, + } + return validP2PKHScript +} + +// outpointFromInt generates an outpoint from an int by hashing the int and making +// the given int the index. +func outpointFromInt(i int) wire.OutPoint { + // Boilerplate to create an outpoint. + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], uint32(i)) + hash := sha256.Sum256(buf[:]) + return wire.OutPoint{Hash: hash, Index: uint32(i)} +} + +func TestUtxoCacheEntrySize(t *testing.T) { + type block struct { + txOuts []*wire.TxOut + outOps []wire.OutPoint + txIns []*wire.TxIn + } + tests := []struct { + name string + blocks []block + expectedSize uint64 + }{ + { + name: "one entry", + blocks: func() []block { + return []block{ + { + txOuts: []*wire.TxOut{ + {Value: 10000, PkScript: getValidP2PKHScript()}, + }, + outOps: []wire.OutPoint{ + outpointFromInt(0), + }, + }, + } + }(), + expectedSize: pubKeyHashLen + baseEntrySize, + }, + { + name: "10 entries, 4 spend", + blocks: func() []block { + blocks := make([]block, 0, 10) + for i := 0; i < 10; i++ { + op := outpointFromInt(i) + + block := block{ + txOuts: []*wire.TxOut{ + {Value: 10000, PkScript: getValidP2PKHScript()}, + }, + outOps: []wire.OutPoint{ + op, + }, + } + + // Spend all outs in blocks less than 4. + if i < 4 { + block.txIns = []*wire.TxIn{ + {PreviousOutPoint: op}, + } + } + + blocks = append(blocks, block) + } + return blocks + }(), + // Multipled by 6 since we'll have 6 entries left. + expectedSize: (pubKeyHashLen + baseEntrySize) * 6, + }, + { + name: "spend everything", + blocks: func() []block { + blocks := make([]block, 0, 500) + for i := 0; i < 500; i++ { + op := outpointFromInt(i) + + block := block{ + txOuts: []*wire.TxOut{ + {Value: 1000, PkScript: getValidP2PKHScript()}, + }, + outOps: []wire.OutPoint{ + op, + }, + } + + // Spend all outs in blocks less than 4. + block.txIns = []*wire.TxIn{ + {PreviousOutPoint: op}, + } + + blocks = append(blocks, block) + } + return blocks + }(), + expectedSize: 0, + }, + } + + for _, test := range tests { + // Size is just something big enough so that the mapslice doesn't + // run out of memory. + s := newUtxoCache(nil, 1*1024*1024) + + for height, block := range test.blocks { + for i, out := range block.txOuts { + s.addTxOut(block.outOps[i], out, true, int32(height)) + } + + for _, in := range block.txIns { + s.addTxIn(in, nil) + } + } + + if s.totalEntryMemory != test.expectedSize { + t.Errorf("Failed test %s. Expected size of %d, got %d", + test.name, test.expectedSize, s.totalEntryMemory) + } + } +} + +// assertConsistencyState asserts the utxo consistency states of the blockchain. +func assertConsistencyState(chain *BlockChain, hash *chainhash.Hash) error { + var bytes []byte + err := chain.db.View(func(dbTx database.Tx) (err error) { + bytes = dbFetchUtxoStateConsistency(dbTx) + return + }) + if err != nil { + return fmt.Errorf("Error fetching utxo state consistency: %v", err) + } + actualHash, err := chainhash.NewHash(bytes) + if err != nil { + return err + } + if !actualHash.IsEqual(hash) { + return fmt.Errorf("Unexpected consistency hash: %v instead of %v", + actualHash, hash) + } + + return nil +} + +// assertNbEntriesOnDisk asserts that the total number of utxo entries on the +// disk is equal to the given expected number. +func assertNbEntriesOnDisk(chain *BlockChain, expectedNumber int) error { + var nb int + err := chain.db.View(func(dbTx database.Tx) error { + cursor := dbTx.Metadata().Bucket(utxoSetBucketName).Cursor() + nb = 0 + for b := cursor.First(); b; b = cursor.Next() { + nb++ + _, err := deserializeUtxoEntry(cursor.Value()) + if err != nil { + return fmt.Errorf("Failed to deserialize entry: %v", err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("Error fetching utxo entries: %v", err) + } + if nb != expectedNumber { + return fmt.Errorf("Expected %d elements in the UTXO set, but found %d", + expectedNumber, nb) + } + + return nil +} + +// utxoCacheTestChain creates a test BlockChain to be used for utxo cache tests. +// It uses the regression test parameters, a coin matutiry of 1 block and sets +// the cache size limit to 10 MiB. +func utxoCacheTestChain(testName string) (*BlockChain, *chaincfg.Params, func()) { + params := chaincfg.RegressionNetParams + chain, tearDown, err := chainSetup(testName, ¶ms) + if err != nil { + panic(fmt.Sprintf("error loading blockchain with database: %v", err)) + } + + chain.TstSetCoinbaseMaturity(1) + chain.utxoCache.maxTotalMemoryUsage = 10 * 1024 * 1024 + chain.utxoCache.cachedEntries.maxTotalMemoryUsage = chain.utxoCache.maxTotalMemoryUsage + + return chain, ¶ms, tearDown +} + +func TestUtxoCacheFlush(t *testing.T) { + chain, params, tearDown := utxoCacheTestChain("TestUtxoCacheFlush") + defer tearDown() + cache := chain.utxoCache + tip := btcutil.NewBlock(params.GenesisBlock) + + // The chainSetup init triggers the consistency status write. + err := assertConsistencyState(chain, params.GenesisHash) + if err != nil { + t.Fatal(err) + } + + err = assertNbEntriesOnDisk(chain, 0) + if err != nil { + t.Fatal(err) + } + + // LastFlushHash starts with genesis. + if cache.lastFlushHash != *params.GenesisHash { + t.Fatalf("lastFlushHash before first flush expected to be "+ + "genesis block hash, instead was %v", cache.lastFlushHash) + } + + // First, add 10 utxos without flushing. + outPoints := make([]wire.OutPoint, 10) + for i := range outPoints { + op := outpointFromInt(i) + outPoints[i] = op + + // Add the txout. + txOut := wire.TxOut{Value: 10000, PkScript: getValidP2PKHScript()} + cache.addTxOut(op, &txOut, true, int32(i)) + } + + if cache.cachedEntries.length() != len(outPoints) { + t.Fatalf("Expected 10 entries, has %d instead", cache.cachedEntries.length()) + } + + // All entries should be fresh and modified. + for _, m := range cache.cachedEntries.maps { + for outpoint, entry := range m { + if entry == nil { + t.Fatalf("Unexpected nil entry found for %v", outpoint) + } + if !entry.isModified() { + t.Fatal("Entry should be marked mofified") + } + if !entry.isFresh() { + t.Fatal("Entry should be marked fresh") + } + } + } + + // Spend the last outpoint and pop it off from the outpoints slice. + var spendOp wire.OutPoint + spendOp, outPoints = outPoints[len(outPoints)-1], outPoints[:len(outPoints)-1] + cache.addTxIn(&wire.TxIn{PreviousOutPoint: spendOp}, nil) + + if cache.cachedEntries.length() != len(outPoints) { + t.Fatalf("Expected %d entries, has %d instead", + len(outPoints), cache.cachedEntries.length()) + } + + // Not flushed yet. + err = assertConsistencyState(chain, params.GenesisHash) + if err != nil { + t.Fatal(err) + } + + err = assertNbEntriesOnDisk(chain, 0) + if err != nil { + t.Fatal(err) + } + + // Flush. + err = chain.db.Update(func(dbTx database.Tx) error { + return cache.flush(dbTx, FlushRequired, chain.stateSnapshot) + }) + if err != nil { + t.Fatalf("unexpected error while flushing cache: %v", err) + } + if cache.cachedEntries.length() != 0 { + t.Fatalf("Expected 0 entries, has %d instead", cache.cachedEntries.length()) + } + + err = assertConsistencyState(chain, tip.Hash()) + if err != nil { + t.Fatal(err) + } + err = assertNbEntriesOnDisk(chain, len(outPoints)) + if err != nil { + t.Fatal(err) + } + + // Fetch the flushed utxos. + entries, err := cache.fetchEntries(outPoints) + if err != nil { + t.Fatal(err) + } + + // Check that the returned entries are not marked fresh and modified. + for _, entry := range entries { + if entry.isFresh() { + t.Fatal("Entry should not be marked fresh") + } + if entry.isModified() { + t.Fatal("Entry should not be marked modified") + } + } + + // Check that the fetched entries in the cache are not marked fresh and modified. + for _, m := range cache.cachedEntries.maps { + for outpoint, elem := range m { + if elem == nil { + t.Fatalf("Unexpected nil entry found for %v", outpoint) + } + if elem.isFresh() { + t.Fatal("Entry should not be marked fresh") + } + if elem.isModified() { + t.Fatal("Entry should not be marked modified") + } + } + } + + // Spend 5 utxos. + prevLen := len(outPoints) + for i := 0; i < 5; i++ { + spendOp, outPoints = outPoints[len(outPoints)-1], outPoints[:len(outPoints)-1] + cache.addTxIn(&wire.TxIn{PreviousOutPoint: spendOp}, nil) + } + + // Should still have the entries in cache so they can be flushed to disk. + if cache.cachedEntries.length() != prevLen { + t.Fatalf("Expected 10 entries, has %d instead", cache.cachedEntries.length()) + } + + // Flush. + err = chain.db.Update(func(dbTx database.Tx) error { + return cache.flush(dbTx, FlushRequired, chain.stateSnapshot) + }) + if err != nil { + t.Fatalf("unexpected error while flushing cache: %v", err) + } + if cache.cachedEntries.length() != 0 { + t.Fatalf("Expected 0 entries, has %d instead", cache.cachedEntries.length()) + } + + err = assertConsistencyState(chain, tip.Hash()) + if err != nil { + t.Fatal(err) + } + err = assertNbEntriesOnDisk(chain, len(outPoints)) + if err != nil { + t.Fatal(err) + } + + // Add 5 utxos without flushing and test for periodic flushes. + outPoints1 := make([]wire.OutPoint, 5) + for i := range outPoints1 { + // i + prevLen here to avoid collision since we're just hashing + // the int. + op := outpointFromInt(i + prevLen) + outPoints1[i] = op + + // Add the txout. + txOut := wire.TxOut{Value: 10000, PkScript: getValidP2PKHScript()} + cache.addTxOut(op, &txOut, true, int32(i+prevLen)) + } + if cache.cachedEntries.length() != len(outPoints1) { + t.Fatalf("Expected %d entries, has %d instead", + len(outPoints1), cache.cachedEntries.length()) + } + + // Attempt to flush with flush periodic. Shouldn't flush. + err = chain.db.Update(func(dbTx database.Tx) error { + return cache.flush(dbTx, FlushPeriodic, chain.stateSnapshot) + }) + if err != nil { + t.Fatalf("unexpected error while flushing cache: %v", err) + } + if cache.cachedEntries.length() == 0 { + t.Fatalf("Expected %d entries, has %d instead", + len(outPoints1), cache.cachedEntries.length()) + } + + // Arbitrarily set the last flush time to 6 minutes ago. + cache.lastFlushTime = time.Now().Add(-time.Minute * 6) + + // Attempt to flush with flush periodic. Should flush now. + err = chain.db.Update(func(dbTx database.Tx) error { + return cache.flush(dbTx, FlushPeriodic, chain.stateSnapshot) + }) + if err != nil { + t.Fatalf("unexpected error while flushing cache: %v", err) + } + if cache.cachedEntries.length() != 0 { + t.Fatalf("Expected 0 entries, has %d instead", cache.cachedEntries.length()) + } + + err = assertConsistencyState(chain, tip.Hash()) + if err != nil { + t.Fatal(err) + } + err = assertNbEntriesOnDisk(chain, len(outPoints)+len(outPoints1)) + if err != nil { + t.Fatal(err) + } +} + +func TestFlushNeededAfterPrune(t *testing.T) { + // Construct a synthetic block chain with a block index consisting of + // the following structure. + // genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 + tip := tstTip + chain := newFakeChain(&chaincfg.MainNetParams) + chain.utxoCache = newUtxoCache(nil, 0) + branchNodes := chainedNodes(chain.bestChain.Genesis(), 18) + for _, node := range branchNodes { + chain.index.SetStatusFlags(node, statusValid) + chain.index.AddNode(node) + } + chain.bestChain.SetTip(tip(branchNodes)) + + tests := []struct { + name string + lastFlushHash chainhash.Hash + delHashes []chainhash.Hash + expected bool + }{ + { + name: "deleted block up to height 9, last flush hash at block 10", + delHashes: func() []chainhash.Hash { + delBlockHashes := make([]chainhash.Hash, 0, 9) + for i := range branchNodes { + if branchNodes[i].height < 10 { + delBlockHashes = append(delBlockHashes, branchNodes[i].hash) + } + } + + return delBlockHashes + }(), + lastFlushHash: func() chainhash.Hash { + // Just some sanity checking to make sure the height is 10. + if branchNodes[9].height != 10 { + panic("was looking for height 10") + } + return branchNodes[9].hash + }(), + expected: false, + }, + { + name: "deleted blocks up to height 10, last flush hash at block 10", + delHashes: func() []chainhash.Hash { + delBlockHashes := make([]chainhash.Hash, 0, 10) + for i := range branchNodes { + if branchNodes[i].height < 11 { + delBlockHashes = append(delBlockHashes, branchNodes[i].hash) + } + } + return delBlockHashes + }(), + lastFlushHash: func() chainhash.Hash { + // Just some sanity checking to make sure the height is 10. + if branchNodes[9].height != 10 { + panic("was looking for height 10") + } + return branchNodes[9].hash + }(), + expected: true, + }, + { + name: "deleted block height 17, last flush hash at block 5", + delHashes: func() []chainhash.Hash { + delBlockHashes := make([]chainhash.Hash, 1) + delBlockHashes[0] = branchNodes[16].hash + // Just some sanity checking to make sure the height is 10. + if branchNodes[16].height != 17 { + panic("was looking for height 17") + } + return delBlockHashes + }(), + lastFlushHash: func() chainhash.Hash { + // Just some sanity checking to make sure the height is 10. + if branchNodes[4].height != 5 { + panic("was looking for height 5") + } + return branchNodes[4].hash + }(), + expected: true, + }, + { + name: "deleted block height 3, last flush hash at block 4", + delHashes: func() []chainhash.Hash { + delBlockHashes := make([]chainhash.Hash, 1) + delBlockHashes[0] = branchNodes[2].hash + // Just some sanity checking to make sure the height is 10. + if branchNodes[2].height != 3 { + panic("was looking for height 3") + } + return delBlockHashes + }(), + lastFlushHash: func() chainhash.Hash { + // Just some sanity checking to make sure the height is 10. + if branchNodes[3].height != 4 { + panic("was looking for height 4") + } + return branchNodes[3].hash + }(), + expected: false, + }, + } + + for _, test := range tests { + chain.utxoCache.lastFlushHash = test.lastFlushHash + got, err := chain.flushNeededAfterPrune(test.delHashes) + if err != nil { + t.Fatal(err) + } + + if got != test.expected { + t.Fatalf("for test %s, expected need flush to return %v but got %v", + test.name, test.expected, got) + } + } +} + +func TestFlushOnPrune(t *testing.T) { + chain, tearDown, err := chainSetup("TestFlushOnPrune", &chaincfg.MainNetParams) + if err != nil { + panic(fmt.Sprintf("error loading blockchain with database: %v", err)) + } + defer tearDown() + + chain.utxoCache.maxTotalMemoryUsage = 10 * 1024 * 1024 + chain.utxoCache.cachedEntries.maxTotalMemoryUsage = chain.utxoCache.maxTotalMemoryUsage + + // Set the maxBlockFileSize and the prune target small so that we can trigger a + // prune to happen. + maxBlockFileSize := uint32(8192) + chain.pruneTarget = uint64(maxBlockFileSize) * 2 + + // Read blocks from the file. + blocks, err := loadBlocks("blk_0_to_14131.dat") + if err != nil { + t.Fatalf("failed to read block from file. %v", err) + } + + syncBlocks := func() { + for i, block := range blocks { + if i == 0 { + // Skip the genesis block. + continue + } + isMainChain, _, err := chain.ProcessBlock(block, BFNone) + if err != nil { + t.Fatal(err) + } + + if !isMainChain { + t.Fatalf("expected block %s to be on the main chain", block.Hash()) + } + } + } + + // Sync the chain. + ffldb.TstRunWithMaxBlockFileSize(chain.db, maxBlockFileSize, syncBlocks) + + // Function that errors out if the block that should exist doesn't exist. + shouldExist := func(dbTx database.Tx, blockHash *chainhash.Hash) { + bytes, err := dbTx.FetchBlock(blockHash) + if err != nil { + t.Fatal(err) + } + block, err := btcutil.NewBlockFromBytes(bytes) + if err != nil { + t.Fatalf("didn't find block %v. %v", blockHash, err) + } + + if !block.Hash().IsEqual(blockHash) { + t.Fatalf("expected to find block %v but got %v", + blockHash, block.Hash()) + } + } + + // Function that errors out if the block that shouldn't exist exists. + shouldNotExist := func(dbTx database.Tx, blockHash *chainhash.Hash) { + bytes, err := dbTx.FetchBlock(chaincfg.MainNetParams.GenesisHash) + if err == nil { + t.Fatalf("expected block %s to be pruned", blockHash) + } + if len(bytes) != 0 { + t.Fatalf("expected block %s to be pruned but got %v", + blockHash, bytes) + } + } + + // The below code checks that the correct blocks were pruned. + chain.db.View(func(dbTx database.Tx) error { + exist := false + for _, block := range blocks { + // Blocks up to the last flush hash should not exist. + // The utxocache is big enough so that it shouldn't flush + // on it being full. It should only flush on prunes. + if block.Hash().IsEqual(&chain.utxoCache.lastFlushHash) { + exist = true + } + + if exist { + shouldExist(dbTx, block.Hash()) + } else { + shouldNotExist(dbTx, block.Hash()) + } + + } + + return nil + }) +} + +func TestInitConsistentState(t *testing.T) { + // Boilerplate for creating a chain. + dbName := "TestFlushOnPrune" + chain, tearDown, err := chainSetup(dbName, &chaincfg.MainNetParams) + if err != nil { + panic(fmt.Sprintf("error loading blockchain with database: %v", err)) + } + defer tearDown() + chain.utxoCache.maxTotalMemoryUsage = 10 * 1024 * 1024 + chain.utxoCache.cachedEntries.maxTotalMemoryUsage = chain.utxoCache.maxTotalMemoryUsage + + // Read blocks from the file. + blocks, err := loadBlocks("blk_0_to_14131.dat") + if err != nil { + t.Fatalf("failed to read block from file. %v", err) + } + + // Sync up to height 13,000. Flush the utxocache at height 11_000. + cacheFlushHeight := 9000 + initialSyncHeight := 12_000 + for i, block := range blocks { + if i == 0 { + // Skip the genesis block. + continue + } + + isMainChain, _, err := chain.ProcessBlock(block, BFNone) + if err != nil { + t.Fatal(err) + } + + if !isMainChain { + t.Fatalf("expected block %s to be on the main chain", block.Hash()) + } + + if i == cacheFlushHeight { + err = chain.FlushUtxoCache(FlushRequired) + if err != nil { + t.Fatal(err) + } + } + if i == initialSyncHeight { + break + } + } + + // Sanity check. + if chain.BestSnapshot().Height != int32(initialSyncHeight) { + t.Fatalf("expected the chain to sync up to height %d", initialSyncHeight) + } + + // Close the database without flushing the utxocache. This leaves the + // chaintip at height 13,000 but the utxocache consistent state at 11,000. + err = chain.db.Close() + if err != nil { + t.Fatal(err) + } + chain.db = nil + + // Re-open the database and pass the re-opened db to internal structs. + dbPath := filepath.Join(testDbRoot, dbName) + ndb, err := database.Open(testDbType, dbPath, blockDataNet) + if err != nil { + t.Fatal(err) + } + chain.db = ndb + chain.utxoCache.db = ndb + chain.index.db = ndb + + // Sanity check to see that the utxo cache was flushed before the + // current chain tip. + var statusBytes []byte + ndb.View(func(dbTx database.Tx) error { + statusBytes = dbFetchUtxoStateConsistency(dbTx) + return nil + }) + statusHash, err := chainhash.NewHash(statusBytes) + if err != nil { + t.Fatal(err) + } + if !statusHash.IsEqual(blocks[cacheFlushHeight].Hash()) { + t.Fatalf("expected the utxocache to be flushed at "+ + "block hash %s but got %s", + blocks[cacheFlushHeight].Hash(), statusHash) + } + + // Call InitConsistentState. This will make the utxocache catch back + // up to the tip. + err = chain.InitConsistentState(chain.bestChain.tip(), nil) + if err != nil { + t.Fatal(err) + } + + // Sync the reset of the blocks. + for i, block := range blocks { + if i <= initialSyncHeight { + continue + } + isMainChain, _, err := chain.ProcessBlock(block, BFNone) + if err != nil { + t.Fatal(err) + } + + if !isMainChain { + t.Fatalf("expected block %s to be on the main chain", block.Hash()) + } + } + + if chain.BestSnapshot().Height != blocks[len(blocks)-1].Height() { + t.Fatalf("expected the chain to sync up to height %d", + blocks[len(blocks)-1].Height()) + } +} diff --git a/blockchain/utxoviewpoint.go b/blockchain/utxoviewpoint.go index 3d1d513a3f..fdd165c095 100644 --- a/blockchain/utxoviewpoint.go +++ b/blockchain/utxoviewpoint.go @@ -28,6 +28,13 @@ const ( // tfModified indicates that a txout has been modified since it was // loaded. tfModified + + // tfFresh indicates that the entry is fresh. This means that the parent + // view never saw this entry. Note that tfFresh is a performance + // optimization with which we can erase entries that are fully spent if we + // know we do not need to commit them. It is always safe to not mark + // tfFresh if that condition is not guaranteed. + tfFresh ) // UtxoEntry houses details about an individual transaction output in a utxo @@ -58,6 +65,22 @@ func (entry *UtxoEntry) isModified() bool { return entry.packedFlags&tfModified == tfModified } +// isFresh returns whether or not it's certain the output has never previously +// been stored in the database. +func (entry *UtxoEntry) isFresh() bool { + return entry.packedFlags&tfFresh == tfFresh +} + +// memoryUsage returns the memory usage in bytes of for the utxo entry. +// It returns 0 for a nil entry. +func (entry *UtxoEntry) memoryUsage() uint64 { + if entry == nil { + return 0 + } + + return baseEntrySize + uint64(cap(entry.pkScript)) +} + // IsCoinBase returns whether or not the output was contained in a coinbase // transaction. func (entry *UtxoEntry) IsCoinBase() bool { @@ -199,7 +222,7 @@ func (view *UtxoViewpoint) addTxOut(outpoint wire.OutPoint, txOut *wire.TxOut, i entry.amount = txOut.Value entry.pkScript = txOut.PkScript entry.blockHeight = blockHeight - entry.packedFlags = tfModified + entry.packedFlags = tfFresh | tfModified if isCoinBase { entry.packedFlags |= tfCoinBase } @@ -503,7 +526,7 @@ func (view *UtxoViewpoint) commit() { // Upon completion of this function, the view will contain an entry for each // requested outpoint. Spent outputs, or those which otherwise don't exist, // will result in a nil entry in the view. -func (view *UtxoViewpoint) fetchUtxosMain(db database.DB, outpoints map[wire.OutPoint]struct{}) error { +func (view *UtxoViewpoint) fetchUtxosMain(db database.DB, outpoints []wire.OutPoint) error { // Nothing to do if there are no requested outputs. if len(outpoints) == 0 { return nil @@ -517,49 +540,82 @@ func (view *UtxoViewpoint) fetchUtxosMain(db database.DB, outpoints map[wire.Out // so other code can use the presence of an entry in the store as a way // to unnecessarily avoid attempting to reload it from the database. return db.View(func(dbTx database.Tx) error { - for outpoint := range outpoints { - entry, err := dbFetchUtxoEntry(dbTx, outpoint) + utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) + for i := range outpoints { + entry, err := dbFetchUtxoEntry(dbTx, utxoBucket, outpoints[i]) if err != nil { return err } - view.entries[outpoint] = entry + view.entries[outpoints[i]] = entry } return nil }) } +// fetchUtxosFromCache fetches unspent transaction output data about the provided +// set of outpoints from the point of view of the end of the main chain at the +// time of the call. It attempts to fetch them from the cache and whatever entries +// that were not in the cache will be attempted to be fetched from the database and +// it'll be cached. +// +// Upon completion of this function, the view will contain an entry for each +// requested outpoint. Spent outputs, or those which otherwise don't exist, +// will result in a nil entry in the view. +func (view *UtxoViewpoint) fetchUtxosFromCache(cache *utxoCache, outpoints []wire.OutPoint) error { + // Nothing to do if there are no requested outputs. + if len(outpoints) == 0 { + return nil + } + + // Load the requested set of unspent transaction outputs from the point + // of view of the end of the main chain. Any missing entries will be + // fetched from the database and be cached. + // + // NOTE: Missing entries are not considered an error here and instead + // will result in nil entries in the view. This is intentionally done + // so other code can use the presence of an entry in the store as a way + // to unnecessarily avoid attempting to reload it from the database. + entries, err := cache.fetchEntries(outpoints) + if err != nil { + return err + } + for i, entry := range entries { + view.entries[outpoints[i]] = entry.Clone() + } + return nil +} + // fetchUtxos loads the unspent transaction outputs for the provided set of // outputs into the view from the database as needed unless they already exist // in the view in which case they are ignored. -func (view *UtxoViewpoint) fetchUtxos(db database.DB, outpoints map[wire.OutPoint]struct{}) error { +func (view *UtxoViewpoint) fetchUtxos(cache *utxoCache, outpoints []wire.OutPoint) error { // Nothing to do if there are no requested outputs. if len(outpoints) == 0 { return nil } // Filter entries that are already in the view. - neededSet := make(map[wire.OutPoint]struct{}) - for outpoint := range outpoints { + needed := make([]wire.OutPoint, 0, len(outpoints)) + for i := range outpoints { // Already loaded into the current view. - if _, ok := view.entries[outpoint]; ok { + if _, ok := view.entries[outpoints[i]]; ok { continue } - neededSet[outpoint] = struct{}{} + needed = append(needed, outpoints[i]) } // Request the input utxos from the database. - return view.fetchUtxosMain(db, neededSet) + return view.fetchUtxosFromCache(cache, needed) } -// fetchInputUtxos loads the unspent transaction outputs for the inputs -// referenced by the transactions in the given block into the view from the -// database as needed. In particular, referenced entries that are earlier in -// the block are added to the view and entries that are already in the view are -// not modified. -func (view *UtxoViewpoint) fetchInputUtxos(db database.DB, block *btcutil.Block) error { +// findInputsToFetch goes through all the blocks and returns all the outpoints of +// the entries that need to be fetched in order to validate the block. Outpoints +// for the entries that are already in the block are not included in the returned +// outpoints. +func (view *UtxoViewpoint) findInputsToFetch(block *btcutil.Block) []wire.OutPoint { // Build a map of in-flight transactions because some of the inputs in // this block could be referencing other transactions earlier in this // block which are not yet in the chain. @@ -572,7 +628,7 @@ func (view *UtxoViewpoint) fetchInputUtxos(db database.DB, block *btcutil.Block) // Loop through all of the transaction inputs (except for the coinbase // which has no inputs) collecting them into sets of what is needed and // what is already known (in-flight). - neededSet := make(map[wire.OutPoint]struct{}) + needed := make([]wire.OutPoint, 0, len(transactions)) for i, tx := range transactions[1:] { for _, txIn := range tx.MsgTx().TxIn { // It is acceptable for a transaction input to reference @@ -601,12 +657,24 @@ func (view *UtxoViewpoint) fetchInputUtxos(db database.DB, block *btcutil.Block) continue } - neededSet[txIn.PreviousOutPoint] = struct{}{} + needed = append(needed, txIn.PreviousOutPoint) } } - // Request the input utxos from the database. - return view.fetchUtxosMain(db, neededSet) + return needed +} + +// fetchInputUtxos loads the unspent transaction outputs for the inputs +// referenced by the transactions in the given block into the view from the +// database or the cache as needed. In particular, referenced entries that +// are earlier in the block are added to the view and entries that are already +// in the view are not modified. +func (view *UtxoViewpoint) fetchInputUtxos(db database.DB, cache *utxoCache, block *btcutil.Block) error { + if cache != nil { + return view.fetchUtxosFromCache(cache, view.findInputsToFetch(block)) + } + // Request the input utxos from the cache. + return view.fetchUtxosMain(db, view.findInputsToFetch(block)) } // NewUtxoViewpoint returns a new empty unspent transaction output view. @@ -626,15 +694,19 @@ func (b *BlockChain) FetchUtxoView(tx *btcutil.Tx) (*UtxoViewpoint, error) { // Create a set of needed outputs based on those referenced by the // inputs of the passed transaction and the outputs of the transaction // itself. - neededSet := make(map[wire.OutPoint]struct{}) + neededLen := len(tx.MsgTx().TxOut) + if !IsCoinBase(tx) { + neededLen += len(tx.MsgTx().TxIn) + } + needed := make([]wire.OutPoint, 0, neededLen) prevOut := wire.OutPoint{Hash: *tx.Hash()} for txOutIdx := range tx.MsgTx().TxOut { prevOut.Index = uint32(txOutIdx) - neededSet[prevOut] = struct{}{} + needed = append(needed, prevOut) } if !IsCoinBase(tx) { for _, txIn := range tx.MsgTx().TxIn { - neededSet[txIn.PreviousOutPoint] = struct{}{} + needed = append(needed, txIn.PreviousOutPoint) } } @@ -642,7 +714,7 @@ func (b *BlockChain) FetchUtxoView(tx *btcutil.Tx) (*UtxoViewpoint, error) { // chain. view := NewUtxoViewpoint() b.chainLock.RLock() - err := view.fetchUtxosMain(b.db, neededSet) + err := view.fetchUtxosFromCache(b.utxoCache, needed) b.chainLock.RUnlock() return view, err } @@ -661,15 +733,10 @@ func (b *BlockChain) FetchUtxoEntry(outpoint wire.OutPoint) (*UtxoEntry, error) b.chainLock.RLock() defer b.chainLock.RUnlock() - var entry *UtxoEntry - err := b.db.View(func(dbTx database.Tx) error { - var err error - entry, err = dbFetchUtxoEntry(dbTx, outpoint) - return err - }) + entries, err := b.utxoCache.fetchEntries([]wire.OutPoint{outpoint}) if err != nil { return nil, err } - return entry, nil + return entries[0], nil } diff --git a/blockchain/validate.go b/blockchain/validate.go index 89971e7fd6..02d36134b1 100644 --- a/blockchain/validate.go +++ b/blockchain/validate.go @@ -5,6 +5,7 @@ package blockchain import ( + "bytes" "encoding/binary" "fmt" "math" @@ -41,6 +42,10 @@ const ( // baseSubsidy is the starting subsidy amount for mined blocks. This // value is halved every SubsidyHalvingInterval blocks. baseSubsidy = 50 * btcutil.SatoshiPerBitcoin + + // coinbaseHeightAllocSize is the amount of bytes that the + // ScriptBuilder will allocate when validating the coinbase height. + coinbaseHeightAllocSize = 5 ) var ( @@ -302,8 +307,8 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { // target difficulty as claimed. // // The flags modify the behavior of this function as follows: -// - BFNoPoWCheck: The check to ensure the block hash is less than the target -// difficulty is not performed. +// - BFNoPoWCheck: The check to ensure the block hash is less than the target +// difficulty is not performed. func checkProofOfWork(header *wire.BlockHeader, powLimit *big.Int, flags BehaviorFlags) error { // The target difficulty must be larger than zero. target := CompactToBig(header.Bits) @@ -421,13 +426,15 @@ func CountP2SHSigOps(tx *btcutil.Tx, isCoinBaseTx bool, utxoView *UtxoViewpoint) return totalSigOps, nil } -// checkBlockHeaderSanity performs some preliminary checks on a block header to +// CheckBlockHeaderSanity performs some preliminary checks on a block header to // ensure it is sane before continuing with processing. These checks are // context free. // // The flags do not modify the behavior of this function directly, however they // are needed to pass along to checkProofOfWork. -func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSource MedianTimeSource, flags BehaviorFlags) error { +func CheckBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, + timeSource MedianTimeSource, flags BehaviorFlags) error { + // Ensure the proof of work bits in the block header is in min/max range // and the block hash is less than the target value described by the // bits. @@ -467,7 +474,7 @@ func checkBlockHeaderSanity(header *wire.BlockHeader, powLimit *big.Int, timeSou func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource MedianTimeSource, flags BehaviorFlags) error { msgBlock := block.MsgBlock() header := &msgBlock.Header - err := checkBlockHeaderSanity(header, powLimit, timeSource, flags) + err := CheckBlockHeaderSanity(header, powLimit, timeSource, flags) if err != nil { return err } @@ -527,12 +534,11 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median // checks. Bitcoind builds the tree here and checks the merkle root // after the following checks, but there is no reason not to check the // merkle root matches here. - merkles := BuildMerkleTreeStore(block.Transactions(), false) - calculatedMerkleRoot := merkles[len(merkles)-1] - if !header.MerkleRoot.IsEqual(calculatedMerkleRoot) { + calcMerkleRoot := CalcMerkleRoot(block.Transactions(), false) + if !header.MerkleRoot.IsEqual(&calcMerkleRoot) { str := fmt.Sprintf("block merkle root is invalid - block "+ "header indicates %v, but calculated value is %v", - header.MerkleRoot, calculatedMerkleRoot) + header.MerkleRoot, calcMerkleRoot) return ruleError(ErrBadMerkleRoot, str) } @@ -609,16 +615,25 @@ func ExtractCoinbaseHeight(coinbaseTx *btcutil.Tx) (int32, error) { return 0, ruleError(ErrMissingCoinbaseHeight, str) } - serializedHeightBytes := make([]byte, 8) - copy(serializedHeightBytes, sigScript[1:serializedLen+1]) - serializedHeight := binary.LittleEndian.Uint64(serializedHeightBytes) + // We use 4 bytes here since it saves us allocations. We use a stack + // allocation rather than a heap allocation here. + var serializedHeightBytes [4]byte + copy(serializedHeightBytes[:], sigScript[1:serializedLen+1]) - return int32(serializedHeight), nil + serializedHeight := int32( + binary.LittleEndian.Uint32(serializedHeightBytes[:]), + ) + + if err := compareScript(serializedHeight, sigScript); err != nil { + return 0, err + } + + return serializedHeight, nil } -// checkSerializedHeight checks if the signature script in the passed +// CheckSerializedHeight checks if the signature script in the passed // transaction starts with the serialized block height of wantHeight. -func checkSerializedHeight(coinbaseTx *btcutil.Tx, wantHeight int32) error { +func CheckSerializedHeight(coinbaseTx *btcutil.Tx, wantHeight int32) error { serializedHeight, err := ExtractCoinbaseHeight(coinbaseTx) if err != nil { return err @@ -633,22 +648,50 @@ func checkSerializedHeight(coinbaseTx *btcutil.Tx, wantHeight int32) error { return nil } -// checkBlockHeaderContext performs several validation checks on the block header +func compareScript(height int32, script []byte) error { + scriptBuilder := txscript.NewScriptBuilder( + txscript.WithScriptAllocSize(coinbaseHeightAllocSize), + ) + scriptHeight, err := scriptBuilder.AddInt64( + int64(height), + ).Script() + if err != nil { + return err + } + + if !bytes.HasPrefix(script, scriptHeight) { + str := fmt.Sprintf("the coinbase signature script does not "+ + "minimally encode the height %d", height) + return ruleError(ErrBadCoinbaseHeight, str) + } + + return nil +} + +// CheckBlockHeaderContext performs several validation checks on the block header // which depend on its position within the block chain. // // The flags modify the behavior of this function as follows: -// - BFFastAdd: All checks except those involving comparing the header against -// the checkpoints are not performed. +// - BFFastAdd: All checks except those involving comparing the header against +// the checkpoints are not performed. +// +// The skipCheckpoint boolean is used so that libraries can skip the checkpoint +// sanity checks. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode *blockNode, flags BehaviorFlags) error { +// NOTE: Ignore the above lock requirement if this function is not passed a +// *Blockchain instance as the ChainCtx argument. +func CheckBlockHeaderContext(header *wire.BlockHeader, prevNode HeaderCtx, + flags BehaviorFlags, c ChainCtx, skipCheckpoint bool) error { + fastAdd := flags&BFFastAdd == BFFastAdd if !fastAdd { // Ensure the difficulty specified in the block header matches // the calculated difficulty based on the previous block and // difficulty retarget rules. - expectedDifficulty, err := b.calcNextRequiredDifficulty(prevNode, - header.Timestamp) + expectedDifficulty, err := calcNextRequiredDifficulty( + prevNode, header.Timestamp, c, + ) if err != nil { return err } @@ -661,7 +704,7 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // Ensure the timestamp for the block header is after the // median time of the last several blocks (medianTimeBlocks). - medianTime := prevNode.CalcPastMedianTime() + medianTime := CalcPastMedianTime(prevNode) if !header.Timestamp.After(medianTime) { str := "block timestamp of %v is not after expected %v" str = fmt.Sprintf(str, header.Timestamp, medianTime) @@ -671,11 +714,30 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // The height of this block is one more than the referenced previous // block. - blockHeight := prevNode.height + 1 + blockHeight := prevNode.Height() + 1 + + // Reject outdated block versions once a majority of the network + // has upgraded. These were originally voted on by BIP0034, + // BIP0065, and BIP0066. + params := c.ChainParams() + if header.Version < 2 && blockHeight >= params.BIP0034Height || + header.Version < 3 && blockHeight >= params.BIP0066Height || + header.Version < 4 && blockHeight >= params.BIP0065Height { + + str := "new blocks with version %d are no longer valid" + str = fmt.Sprintf(str, header.Version) + return ruleError(ErrBlockVersionTooOld, str) + } + + if skipCheckpoint { + // If the caller wants us to skip the checkpoint checks, we'll + // return early. + return nil + } // Ensure chain matches up to predetermined checkpoints. blockHash := header.BlockHash() - if !b.verifyCheckpoint(blockHeight, &blockHash) { + if !c.VerifyCheckpoint(blockHeight, &blockHash) { str := fmt.Sprintf("block at height %d does not match "+ "checkpoint hash", blockHeight) return ruleError(ErrBadCheckpoint, str) @@ -685,30 +747,17 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // chain before it. This prevents storage of new, otherwise valid, // blocks which build off of old blocks that are likely at a much easier // difficulty and therefore could be used to waste cache and disk space. - checkpointNode, err := b.findPreviousCheckpoint() + checkpointNode, err := c.FindPreviousCheckpoint() if err != nil { return err } - if checkpointNode != nil && blockHeight < checkpointNode.height { + if checkpointNode != nil && blockHeight < checkpointNode.Height() { str := fmt.Sprintf("block at height %d forks the main chain "+ "before the previous checkpoint at height %d", - blockHeight, checkpointNode.height) + blockHeight, checkpointNode.Height()) return ruleError(ErrForkTooOld, str) } - // Reject outdated block versions once a majority of the network - // has upgraded. These were originally voted on by BIP0034, - // BIP0065, and BIP0066. - params := b.chainParams - if header.Version < 2 && blockHeight >= params.BIP0034Height || - header.Version < 3 && blockHeight >= params.BIP0066Height || - header.Version < 4 && blockHeight >= params.BIP0065Height { - - str := "new blocks with version %d are no longer valid" - str = fmt.Sprintf(str, header.Version) - return ruleError(ErrBlockVersionTooOld, str) - } - return nil } @@ -716,8 +765,8 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode // on its position within the block chain. // // The flags modify the behavior of this function as follows: -// - BFFastAdd: The transaction are not checked to see if they are finalized -// and the somewhat expensive BIP0034 validation is not performed. +// - BFFastAdd: The transaction are not checked to see if they are finalized +// and the somewhat expensive BIP0034 validation is not performed. // // The flags are also passed to checkBlockHeaderContext. See its documentation // for how the flags modify its behavior. @@ -726,7 +775,7 @@ func (b *BlockChain) checkBlockHeaderContext(header *wire.BlockHeader, prevNode func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode, flags BehaviorFlags) error { // Perform all block header related validation checks. header := &block.MsgBlock().Header - err := b.checkBlockHeaderContext(header, prevNode, flags) + err := CheckBlockHeaderContext(header, prevNode, flags, b, false) if err != nil { return err } @@ -746,7 +795,7 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode // timestamps for all lock-time based checks. blockTime := header.Timestamp if csvState == ThresholdActive { - blockTime = prevNode.CalcPastMedianTime() + blockTime = CalcPastMedianTime(prevNode) } // The height of this block is one more than the referenced @@ -772,7 +821,7 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode blockHeight >= b.chainParams.BIP0034Height { coinbaseTx := block.Transactions()[0] - err := checkSerializedHeight(coinbaseTx, blockHeight) + err := CheckSerializedHeight(coinbaseTx, blockHeight) if err != nil { return err } @@ -832,22 +881,22 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { // Fetch utxos for all of the transaction ouputs in this block. // Typically, there will not be any utxos for any of the outputs. - fetchSet := make(map[wire.OutPoint]struct{}) + fetch := make([]wire.OutPoint, 0, len(block.Transactions())) for _, tx := range block.Transactions() { prevOut := wire.OutPoint{Hash: *tx.Hash()} for txOutIdx := range tx.MsgTx().TxOut { prevOut.Index = uint32(txOutIdx) - fetchSet[prevOut] = struct{}{} + fetch = append(fetch, prevOut) } } - err := view.fetchUtxos(b.db, fetchSet) + err := view.fetchUtxos(b.utxoCache, fetch) if err != nil { return err } // Duplicate transactions are only allowed if the previous transaction // is fully spent. - for outpoint := range fetchSet { + for _, outpoint := range fetch { utxo := view.LookupEntry(outpoint) if utxo != nil && !utxo.IsSpent() { str := fmt.Sprintf("tried to overwrite transaction %v "+ @@ -1031,11 +1080,11 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi } // Load all of the utxos referenced by the inputs for all transactions - // in the block don't already exist in the utxo view from the database. + // in the block don't already exist in the utxo view from the cache. // // These utxo entries are needed for verification of things such as // transaction inputs, counting pay-to-script-hashes, and scripts. - err := view.fetchInputUtxos(b.db, block) + err := view.fetchInputUtxos(nil, b.utxoCache, block) if err != nil { return err } @@ -1186,7 +1235,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi // We obtain the MTP of the *previous* block in order to // determine if transactions in the current block are final. - medianTime := node.parent.CalcPastMedianTime() + medianTime := CalcPastMedianTime(node.parent) // Additionally, if the CSV soft-fork package is now active, // then we also enforce the relative sequence number based @@ -1288,3 +1337,68 @@ func (b *BlockChain) CheckConnectBlockTemplate(block *btcutil.Block) error { newNode := newBlockNode(&header, tip) return b.checkConnectBlock(newNode, block, view, nil) } + +// ChainParams returns the Blockchain's configured chaincfg.Params. +// +// NOTE: Part of the ChainCtx interface. +func (b *BlockChain) ChainParams() *chaincfg.Params { + return b.chainParams +} + +// BlocksPerRetarget returns the number of blocks before retargeting occurs. +// +// NOTE: Part of the ChainCtx interface. +func (b *BlockChain) BlocksPerRetarget() int32 { + return b.blocksPerRetarget +} + +// MinRetargetTimespan returns the minimum amount of time to use in the +// difficulty calculation. +// +// NOTE: Part of the ChainCtx interface. +func (b *BlockChain) MinRetargetTimespan() int64 { + return b.minRetargetTimespan +} + +// MaxRetargetTimespan returns the maximum amount of time to use in the +// difficulty calculation. +// +// NOTE: Part of the ChainCtx interface. +func (b *BlockChain) MaxRetargetTimespan() int64 { + return b.maxRetargetTimespan +} + +// VerifyCheckpoint checks that the height and hash match the stored +// checkpoints. +// +// NOTE: Part of the ChainCtx interface. +func (b *BlockChain) VerifyCheckpoint(height int32, + hash *chainhash.Hash) bool { + + return b.verifyCheckpoint(height, hash) +} + +// FindPreviousCheckpoint finds the checkpoint we've encountered during +// validation. +// +// NOTE: Part of the ChainCtx interface. +func (b *BlockChain) FindPreviousCheckpoint() (HeaderCtx, error) { + checkpoint, err := b.findPreviousCheckpoint() + if err != nil { + return nil, err + } + + if checkpoint == nil { + // This check is necessary because if we just return the nil + // blockNode as a HeaderCtx, a caller performing a nil-check + // will fail. This is a quirk of go where a nil value stored in + // an interface is different from the actual nil interface. + return nil, nil + } + + return checkpoint, err +} + +// A compile-time assertion to ensure BlockChain implements the ChainCtx +// interface. +var _ ChainCtx = (*BlockChain)(nil) diff --git a/blockchain/validate_test.go b/blockchain/validate_test.go index d5d17781d3..ddd59130c1 100644 --- a/blockchain/validate_test.go +++ b/blockchain/validate_test.go @@ -10,10 +10,10 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // TestSequenceLocksActive tests the SequenceLockActive function to ensure it @@ -169,7 +169,7 @@ func TestCheckBlockSanity(t *testing.T) { } } -// TestCheckSerializedHeight tests the checkSerializedHeight function with +// TestCheckSerializedHeight tests the CheckSerializedHeight function with // various serialized heights and also does negative tests to ensure errors // and handled properly. func TestCheckSerializedHeight(t *testing.T) { @@ -215,9 +215,9 @@ func TestCheckSerializedHeight(t *testing.T) { msgTx.TxIn[0].SignatureScript = test.sigScript tx := btcutil.NewTx(msgTx) - err := checkSerializedHeight(tx, test.wantHeight) + err := CheckSerializedHeight(tx, test.wantHeight) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { - t.Errorf("checkSerializedHeight #%d wrong error type "+ + t.Errorf("CheckSerializedHeight #%d wrong error type "+ "got: %v <%T>, want: %T", i, err, err, test.err) continue } @@ -225,7 +225,7 @@ func TestCheckSerializedHeight(t *testing.T) { if rerr, ok := err.(RuleError); ok { trerr := test.err.(RuleError) if rerr.ErrorCode != trerr.ErrorCode { - t.Errorf("checkSerializedHeight #%d wrong "+ + t.Errorf("CheckSerializedHeight #%d wrong "+ "error code got: %v, want: %v", i, rerr.ErrorCode, trerr.ErrorCode) continue diff --git a/btcd.go b/btcd.go index aec55e06eb..c7f292cbc9 100644 --- a/btcd.go +++ b/btcd.go @@ -88,6 +88,18 @@ func btcdMain(serverChan chan<- *server) error { defer pprof.StopCPUProfile() } + // Write mem profile if requested. + if cfg.MemoryProfile != "" { + f, err := os.Create(cfg.MemoryProfile) + if err != nil { + btcdLog.Errorf("Unable to create memory profile: %v", err) + return err + } + defer f.Close() + defer pprof.WriteHeapProfile(f) + defer runtime.GC() + } + // Perform upgrades to btcd as new versions require it. if err := doUpgrades(); err != nil { btcdLog.Errorf("%v", err) @@ -145,6 +157,88 @@ func btcdMain(serverChan chan<- *server) error { return nil } + // Check if the database had previously been pruned. If it had been, it's + // not possible to newly generate the tx index and addr index. + var beenPruned bool + db.View(func(dbTx database.Tx) error { + beenPruned, err = dbTx.BeenPruned() + return err + }) + if err != nil { + btcdLog.Errorf("%v", err) + return err + } + if beenPruned && cfg.Prune == 0 { + err = fmt.Errorf("--prune cannot be disabled as the node has been "+ + "previously pruned. You must delete the files in the datadir: \"%s\" "+ + "and sync from the beginning to disable pruning", cfg.DataDir) + btcdLog.Errorf("%v", err) + return err + } + if beenPruned && cfg.TxIndex { + err = fmt.Errorf("--txindex cannot be enabled as the node has been "+ + "previously pruned. You must delete the files in the datadir: \"%s\" "+ + "and sync from the beginning to enable the desired index", cfg.DataDir) + btcdLog.Errorf("%v", err) + return err + } + if beenPruned && cfg.AddrIndex { + err = fmt.Errorf("--addrindex cannot be enabled as the node has been "+ + "previously pruned. You must delete the files in the datadir: \"%s\" "+ + "and sync from the beginning to enable the desired index", cfg.DataDir) + btcdLog.Errorf("%v", err) + return err + } + // If we've previously been pruned and the cfindex isn't present, it means that the + // user wants to enable the cfindex after the node has already synced up and been + // pruned. + if beenPruned && !indexers.CfIndexInitialized(db) && !cfg.NoCFilters { + err = fmt.Errorf("compact filters cannot be enabled as the node has been "+ + "previously pruned. You must delete the files in the datadir: \"%s\" "+ + "and sync from the beginning to enable the desired index. You may "+ + "use the --nocfilters flag to start the node up without the compact "+ + "filters", cfg.DataDir) + btcdLog.Errorf("%v", err) + return err + } + // If the user wants to disable the cfindex and is pruned or has enabled pruning, force + // the user to either drop the cfindex manually or restart the node without the --nocfilters + // flag. + if (beenPruned || cfg.Prune != 0) && indexers.CfIndexInitialized(db) && cfg.NoCFilters { + err = fmt.Errorf("--nocfilters flag was given but the compact filters have " + + "previously been enabled on this node and the index data currently " + + "exists in the database. The node has also been previously pruned and " + + "the database would be left in an inconsistent state if the compact " + + "filters don't get indexed now. To disable compact filters, please drop the " + + "index completely with the --dropcfindex flag and restart the node. " + + "To keep the compact filters, restart the node without the --nocfilters " + + "flag") + btcdLog.Errorf("%v", err) + return err + } + + // Enforce removal of txindex and addrindex if user requested pruning. + // This is to require explicit action from the user before removing + // indexes that won't be useful when block files are pruned. + // + // NOTE: The order is important here because dropping the tx index also + // drops the address index since it relies on it. We explicitly make the + // user drop both indexes if --addrindex was enabled previously. + if cfg.Prune != 0 && indexers.AddrIndexInitialized(db) { + err = fmt.Errorf("--prune flag may not be given when the address index " + + "has been initialized. Please drop the address index with the " + + "--dropaddrindex flag before enabling pruning") + btcdLog.Errorf("%v", err) + return err + } + if cfg.Prune != 0 && indexers.TxIndexInitialized(db) { + err = fmt.Errorf("--prune flag may not be given when the transaction index " + + "has been initialized. Please drop the transaction index with the " + + "--droptxindex flag before enabling pruning") + btcdLog.Errorf("%v", err) + return err + } + // The config file is already created if it did not exist and the log // file has already been opened by now so we only need to allow // creating rpc cert and key files if they don't exist. diff --git a/btcec/btcec_test.go b/btcec/btcec_test.go index 3113a1b553..f5d9395274 100644 --- a/btcec/btcec_test.go +++ b/btcec/btcec_test.go @@ -527,7 +527,7 @@ type baseMultTest struct { x, y string } -//TODO: add more test vectors +// TODO: add more test vectors var s256BaseMultTests = []baseMultTest{ { "AA5E28D6A97A2479A65527F7290311A3624D4CC0FA1578598EE3C2613BF99522", @@ -556,7 +556,7 @@ var s256BaseMultTests = []baseMultTest{ }, } -//TODO: test different curves as well? +// TODO: test different curves as well? func TestBaseMult(t *testing.T) { s256 := S256() for i, e := range s256BaseMultTests { diff --git a/btcec/modnscalar.go b/btcec/modnscalar.go index b18b2c1d43..939b0c17a7 100644 --- a/btcec/modnscalar.go +++ b/btcec/modnscalar.go @@ -11,7 +11,7 @@ import ( // arithmetic over the secp256k1 group order. This means all arithmetic is // performed modulo: // -// 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 +// 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 // // It only implements the arithmetic needed for elliptic curve operations, // however, the operations that are not implemented can typically be worked diff --git a/btcec/privkey.go b/btcec/privkey.go index 4efa806c5f..d0dbd8d9f9 100644 --- a/btcec/privkey.go +++ b/btcec/privkey.go @@ -9,7 +9,7 @@ import ( ) // PrivateKey wraps an ecdsa.PrivateKey as a convenience mainly for signing -// things with the the private key without having to directly import the ecdsa +// things with the private key without having to directly import the ecdsa // package. type PrivateKey = secp.PrivateKey diff --git a/btcec/pubkey.go b/btcec/pubkey.go index 7968ed042a..c4b0680a7a 100644 --- a/btcec/pubkey.go +++ b/btcec/pubkey.go @@ -19,7 +19,7 @@ const ( pubkeyHybrid byte = 0x6 // y_bit + x coord + y coord ) -// IsCompressedPubKey returns true the the passed serialized public key has +// IsCompressedPubKey returns true the passed serialized public key has // been encoded in compressed format, and false otherwise. func IsCompressedPubKey(pubKey []byte) bool { // The public key is only compressed if it is the correct length and diff --git a/btcec/schnorr/musig2/bench_test.go b/btcec/schnorr/musig2/bench_test.go index 70d7e931ce..aac298aa28 100644 --- a/btcec/schnorr/musig2/bench_test.go +++ b/btcec/schnorr/musig2/bench_test.go @@ -45,14 +45,9 @@ func genSigner(t *testing.B) signer { t.Fatalf("unable to gen priv key: %v", err) } - pubKey, err := schnorr.ParsePubKey( - schnorr.SerializePubKey(privKey.PubKey()), - ) - if err != nil { - t.Fatalf("unable to gen key: %v", err) - } + pubKey := privKey.PubKey() - nonces, err := GenNonces() + nonces, err := GenNonces(WithPublicKey(pubKey)) if err != nil { t.Fatalf("unable to gen nonces: %v", err) } @@ -185,7 +180,7 @@ func BenchmarkPartialVerify(b *testing.B) { for i := 0; i < b.N; i++ { ok = sig.Verify( signers[0].nonces.PubNonce, combinedNonce, - keys, pubKey, msg, + keys, pubKey, msg, signOpts..., ) if !ok { b.Fatalf("generated invalid sig!") diff --git a/btcec/schnorr/musig2/context.go b/btcec/schnorr/musig2/context.go index 0dfc6f38af..8f4521502a 100644 --- a/btcec/schnorr/musig2/context.go +++ b/btcec/schnorr/musig2/context.go @@ -234,23 +234,23 @@ func NewContext(signingKey *btcec.PrivateKey, shouldSort bool, opts.keySet = make([]*btcec.PublicKey, 0, opts.numSigners) opts.keySet = append(opts.keySet, pubKey) - // If early nonce generation is specified, then we'll generate - // the nonce now to pass in to the session once all the callers - // are known. - if opts.earlyNonce { - var err error - ctx.sessionNonce, err = GenNonces( - WithNonceSecretKeyAux(signingKey), - ) - if err != nil { - return nil, err - } - } - default: return nil, ErrSignersNotSpecified } + // If early nonce generation is specified, then we'll generate the + // nonce now to pass in to the session once all the callers are known. + if opts.earlyNonce { + var err error + ctx.sessionNonce, err = GenNonces( + WithPublicKey(ctx.pubKey), + WithNonceSecretKeyAux(signingKey), + ) + if err != nil { + return nil, err + } + } + return ctx, nil } @@ -483,6 +483,7 @@ func (c *Context) NewSession(options ...SessionOption) (*Session, error) { // in some auxiliary information to strengthen the nonce // generated. localNonces, err = GenNonces( + WithPublicKey(c.pubKey), WithNonceSecretKeyAux(c.signingKey), WithNonceCombinedKeyAux(c.combinedKey.FinalKey), ) @@ -560,7 +561,7 @@ func (s *Session) Sign(msg [32]byte, return nil, ErrSigningContextReuse // We also need to make sure we have the combined nonce, otherwise this - // funciton was called too early. + // function was called too early. case s.combinedNonce == nil: return nil, ErrCombinedNonceUnavailable } diff --git a/btcec/schnorr/musig2/data/nonce_gen_vectors.json b/btcec/schnorr/musig2/data/nonce_gen_vectors.json index 9727cfe93b..450234974c 100644 --- a/btcec/schnorr/musig2/data/nonce_gen_vectors.json +++ b/btcec/schnorr/musig2/data/nonce_gen_vectors.json @@ -3,34 +3,38 @@ { "rand_": "0000000000000000000000000000000000000000000000000000000000000000", "sk": "0202020202020202020202020202020202020202020202020202020202020202", + "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766", "aggpk": "0707070707070707070707070707070707070707070707070707070707070707", "msg": "0101010101010101010101010101010101010101010101010101010101010101", "extra_in": "0808080808080808080808080808080808080808080808080808080808080808", - "expected": "BC6C683EBBCC39DCB3C29B3D010D2AAA7C86CFB562FC41ED9A460EE061013E75FB4AD2F0B816713269800D018803906D5481E00A940EAB4F4AC49B4A372EB0F4" + "expected": "227243DCB40EF2A13A981DB188FA433717B506BDFA14B1AE47D5DC027C9C3B9EF2370B2AD206E724243215137C86365699361126991E6FEC816845F837BDDAC3024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766" }, { "rand_": "0000000000000000000000000000000000000000000000000000000000000000", "sk": "0202020202020202020202020202020202020202020202020202020202020202", + "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766", "aggpk": "0707070707070707070707070707070707070707070707070707070707070707", "msg": "", "extra_in": "0808080808080808080808080808080808080808080808080808080808080808", - "expected": "AAC4BFD707F4953B4063851D7E4AAD5C59D5D0BFB0E71012788A85698B5ACF8F11834D5051928424BA501C8CD064F3F942F8D4A07D8A2ED79F153E4ABD9EBBE9" + "expected": "CD0F47FE471D6788FF3243F47345EA0A179AEF69476BE8348322EF39C2723318870C2065AFB52DEDF02BF4FDBF6D2F442E608692F50C2374C08FFFE57042A61C024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766" }, { "rand_": "0000000000000000000000000000000000000000000000000000000000000000", "sk": "0202020202020202020202020202020202020202020202020202020202020202", + "pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766", "aggpk": "0707070707070707070707070707070707070707070707070707070707070707", "msg": "2626262626262626262626262626262626262626262626262626262626262626262626262626", "extra_in": "0808080808080808080808080808080808080808080808080808080808080808", - "expected": "DF54500DD2B503DBA3753C48A9D6B67E6C11EC4325EDD1DC256C7F75D6A85DBECA6D9857A6F3F292FB3B50DBCBF69FADB67B1CDDB0EA6EB693F6455C4C9088E1" + "expected": "011F8BC60EF061DEEF4D72A0A87200D9994B3F0CD9867910085C38D5366E3E6B9FF03BC0124E56B24069E91EC3F162378983F194E8BD0ED89BE3059649EAE262024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766" }, { "rand_": "0000000000000000000000000000000000000000000000000000000000000000", "sk": null, + "pk": "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9", "aggpk": null, "msg": null, "extra_in": null, - "expected": "7B3B5A002356471AF0E961DE2549C121BD0D48ABCEEDC6E034BDDF86AD3E0A187ECEE674CEF7364B0BC4BEEFB8B66CAD89F98DE2F8C5A5EAD5D1D1E4BD7D04CD" + "expected": "890E83616A3BC4640AB9B6374F21C81FF89CDDDBAFAA7475AE2A102A92E3EDB29FD7E874E23342813A60D9646948242646B7951CA046B4B36D7D6078506D3C9402F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9" } ] -} +} \ No newline at end of file diff --git a/btcec/schnorr/musig2/data/sig_agg_vectors.json b/btcec/schnorr/musig2/data/sig_agg_vectors.json index 7ae9444fd5..afe9eaf3f4 100644 --- a/btcec/schnorr/musig2/data/sig_agg_vectors.json +++ b/btcec/schnorr/musig2/data/sig_agg_vectors.json @@ -6,12 +6,12 @@ "02352433B21E7E05D3B452B81CAE566E06D2E003ECE16D1074AABA4289E0E3D581" ], "pnonces": [ - "0300A32F8548F59C533F55DB9754E3C0BA3C2544F085649FDCE42B8BD3F244C2CA0384449BED61004E8863452A38534E91875516C3CC543122CE2BE1F31845025588", - "03F66B072A869BC2A57D776D487151D707E82B4F1B885066A589858C1BF3871DB603ED391C9658AB6031A96ACBD5E2D9FEC465EFDC8C0D0B765C9B9F3579D520FB6F", - "03A5791CA078E278126EF457C25B5C835F7282C0A47BDBF464BA35C3769427D5CD034D40350F8A5590985E38AAEFC3C695DF671C2E5498E2B60C082C546E06ECAF78", - "020DE6382B8C0550E8174D5263B981224EBCFEF7706588B6936177FEB68E639B8C02BA5F18DDB3487AD087F63CEF7D7818AC8ECA3D6B736113FF36FB25D113F514F6", - "031883080513BB69B31367F9A7B5F4E81246C627060A7414B7F137FA8459F261990345445505F158EDCFDF0D4BF26E04E018C143BF76B5D457AE57DF06CA41371DF0", - "0300028E83123E7FAB1E1F230547CE8B96CC23F13197312972DE72AACBA98EF9870274C2D8566E9E021AA7E2DDDA01B52AE670E0742418F147610528B65ACDB4D0B3" + "036E5EE6E28824029FEA3E8A9DDD2C8483F5AF98F7177C3AF3CB6F47CAF8D94AE902DBA67E4A1F3680826172DA15AFB1A8CA85C7C5CC88900905C8DC8C328511B53E", + "03E4F798DA48A76EEC1C9CC5AB7A880FFBA201A5F064E627EC9CB0031D1D58FC5103E06180315C5A522B7EC7C08B69DCD721C313C940819296D0A7AB8E8795AC1F00", + "02C0068FD25523A31578B8077F24F78F5BD5F2422AFF47C1FADA0F36B3CEB6C7D202098A55D1736AA5FCC21CF0729CCE852575C06C081125144763C2C4C4A05C09B6", + "031F5C87DCFBFCF330DEE4311D85E8F1DEA01D87A6F1C14CDFC7E4F1D8C441CFA40277BF176E9F747C34F81B0D9F072B1B404A86F402C2D86CF9EA9E9C69876EA3B9", + "023F7042046E0397822C4144A17F8B63D78748696A46C3B9F0A901D296EC3406C302022B0B464292CF9751D699F10980AC764E6F671EFCA15069BBE62B0D1C62522A", + "02D97DDA5988461DF58C5897444F116A7C74E5711BF77A9446E27806563F3B6C47020CBAD9C363A7737F99FA06B6BE093CEAFF5397316C5AC46915C43767AE867C00" ], "tweaks": [ "B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C", @@ -19,63 +19,128 @@ "75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8" ], "psigs": [ - "7918521F42E5727FE2E82D802876E0C8844336FDA1B58C82696A55B0188C8B3D", - "599044037AE15C4A99FB94F022B48E7AB215BF703954EC0B83D0E06230476001", - "F05BE3CA783AD1FAF68C5059B43F859BFD4EBB0242459DF2C6BF013F4217F7E7", - "BF85B2A751066466C24A5E7FA6C90DBAADAC2DF1F0BB48546AE239E340437CEB", - "142076B034A7401123EFB07E2317DF819B86B3FFA17180DDD093997D018270D0", - "B7A0C7F5B325B7993925E56B60F53EF8198169F31E1AF7E62BBEF1C5DCD1BA22", - "C717ECA32C148CE8EB8882CD9656DF9C64929DCAE9AF798E381B1E888DDF0F8F", - "5988823E78488D8005311E16E5EA67AF70514CB44F5A5CD51FFA262BEEAA21CE", + "B15D2CD3C3D22B04DAE438CE653F6B4ECF042F42CFDED7C41B64AAF9B4AF53FB", + "6193D6AC61B354E9105BBDC8937A3454A6D705B6D57322A5A472A02CE99FCB64", + "9A87D3B79EC67228CB97878B76049B15DBD05B8158D17B5B9114D3C226887505", + "66F82EA90923689B855D36C6B7E032FB9970301481B99E01CDB4D6AC7C347A15", + "4F5AEE41510848A6447DCD1BBC78457EF69024944C87F40250D3EF2C25D33EFE", + "DDEF427BBB847CC027BEFF4EDB01038148917832253EBC355FC33F4A8E2FCCE4", + "97B890A26C981DA8102D3BC294159D171D72810FDF7C6A691DEF02F0F7AF3FDC", + "53FA9E08BA5243CBCB0D797C5EE83BC6728E539EB76C2D0BF0F971EE4E909971", "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141" ], "msg": "599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869", "valid_test_cases": [ { - "aggnonce": "02BC34CDF6FA1298D7B6A126812FAD0739005BC44E45C21276EEFE41AAF841C86F03F3562AED52243BB99F43D1677DB59F0FEFB961633997F7AC924B78FBD0B0334F", - "nonce_indices": [0, 1], - "key_indices": [0, 1], + "aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B", + "nonce_indices": [ + 0, + 1 + ], + "key_indices": [ + 0, + 1 + ], "tweak_indices": [], "is_xonly": [], - "psig_indices": [0, 1], - "expected": "CA3C28729659E50F829F55DC5DB1DE88A05D1702B4165B85F95B627FC57733F8D2A89622BDC6CECA7CE3C2704B2B6F433658F66DDB0A788DED3B361248D3EB3E" + "psig_indices": [ + 0, + 1 + ], + "expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E" }, { - "aggnonce": "035538518B8043CF4EACD0E701A80657B741C0E6445EC1D6C6177964D22C642971030CFE657EC882F4E08E751B883A78AC1491B30FC86CB57AF2DFF012C2BE6DF1F2", - "nonce_indices": [0, 2], - "key_indices": [0, 2], + "aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20", + "nonce_indices": [ + 0, + 2 + ], + "key_indices": [ + 0, + 2 + ], "tweak_indices": [], "is_xonly": [], - "psig_indices": [2, 3], - "expected": "3997A11DFF76349532CF25E761365EA1D4F24B62EB23A12A9DAABD5976C3DB9FAFE19671C9413661B8D6AED95B089357F04C0C0D83B8460B71CEDC95B2253391" + "psig_indices": [ + 2, + 3 + ], + "expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9" }, { - "aggnonce": "024366775E6FFBEBBB954225936BAED71A3884C7933B18225088D19E7AF12D8D5D028D79A520B347B793FFE897A7EB79A4366A3FDCDC652C243FAC3976B3D6DF8AB2", - "nonce_indices": [0, 3], - "key_indices": [0, 2], - "tweak_indices": [0], - "is_xonly": [false], - "psig_indices": [4, 5], - "expected": "5AF759C2839B7FEE59D31DAB800F82FC21258457773A3B1F69F5228C80CAD4317EA39AD756601030E4D4051B7C9A25AB4DE7CB39BED26E0A03A1B2ED5B747F7F" + "aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D", + "nonce_indices": [ + 0, + 3 + ], + "key_indices": [ + 0, + 2 + ], + "tweak_indices": [ + 0 + ], + "is_xonly": [ + false + ], + "psig_indices": [ + 4, + 5 + ], + "expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC" }, { - "aggnonce": "03B25098C6D0B72DC5717314AF26C126609B4776AA468553DD4354EE20B216B227027D242E9203499173A74E286C1F796F2711E171EE937706BBEA2F4DB10C4E6809", - "nonce_indices": [0, 4], - "key_indices": [0, 3], - "tweak_indices": [0, 1, 2], - "is_xonly": [true, false, true], - "psig_indices": [6, 7], - "expected": "B495A478F91D6E10BF08A156E46D9E62B4C5399C1AEDDA1A9D306F06AFB8A52F2C078FD6B50DDBC33BFFE583C3C1E3D0D5E52891E190101C70D2278BCA943457" + "aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD", + "nonce_indices": [ + 0, + 4 + ], + "key_indices": [ + 0, + 3 + ], + "tweak_indices": [ + 0, + 1, + 2 + ], + "is_xonly": [ + true, + false, + true + ], + "psig_indices": [ + 6, + 7 + ], + "expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E" } ], "error_test_cases": [ { - "aggnonce": "03B25098C6D0B72DC5717314AF26C126609B4776AA468553DD4354EE20B216B227027D242E9203499173A74E286C1F796F2711E171EE937706BBEA2F4DB10C4E6809", - "nonce_indices": [0, 4], - "key_indices": [0, 3], - "tweak_indices": [0, 1, 2], - "is_xonly": [true, false, true], - "psig_indices": [7, 8], + "aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD", + "nonce_indices": [ + 0, + 4 + ], + "key_indices": [ + 0, + 3 + ], + "tweak_indices": [ + 0, + 1, + 2 + ], + "is_xonly": [ + true, + false, + true + ], + "psig_indices": [ + 7, + 8 + ], "error": { "type": "invalid_contribution", "signer": 1 @@ -83,4 +148,4 @@ "comment": "Partial signature is invalid because it exceeds group size" } ] -} +} \ No newline at end of file diff --git a/btcec/schnorr/musig2/data/sign_verify_vectors.json b/btcec/schnorr/musig2/data/sign_verify_vectors.json index e2499c7fb6..324537168b 100644 --- a/btcec/schnorr/musig2/data/sign_verify_vectors.json +++ b/btcec/schnorr/musig2/data/sign_verify_vectors.json @@ -7,8 +7,8 @@ "020000000000000000000000000000000000000000000000000000000000000007" ], "secnonces": [ - "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F7", - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9", + "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9" ], "pnonces": [ "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480", @@ -65,6 +65,17 @@ } ], "sign_error_test_cases": [ + { + "key_indices": [1, 2], + "aggnonce_index": 0, + "msg_index": 0, + "secnonce_index": 0, + "error": { + "type": "value", + "message": "The signer's pubkey must be included in the list of pubkeys." + }, + "comment": "The signers pubkey is not in the list of pubkeys" + }, { "key_indices": [1, 0, 3], "aggnonce_index": 0, diff --git a/btcec/schnorr/musig2/data/tweak_vectors.json b/btcec/schnorr/musig2/data/tweak_vectors.json index 01ccb8b1b3..d0a7cfe832 100644 --- a/btcec/schnorr/musig2/data/tweak_vectors.json +++ b/btcec/schnorr/musig2/data/tweak_vectors.json @@ -5,7 +5,7 @@ "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9", "02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659" ], - "secnonce": "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F7", + "secnonce": "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9", "pnonces": [ "0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480", "0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", diff --git a/btcec/schnorr/musig2/musig2_test.go b/btcec/schnorr/musig2/musig2_test.go index 6842e911d9..91dad90b3e 100644 --- a/btcec/schnorr/musig2/musig2_test.go +++ b/btcec/schnorr/musig2/musig2_test.go @@ -278,7 +278,8 @@ func TestMuSigEarlyNonce(t *testing.T) { t.Fatalf("unexpected ctx error: %v", err) } - numSigners := 2 + signers := []*btcec.PublicKey{privKey1.PubKey(), privKey2.PubKey()} + numSigners := len(signers) ctx1, err := NewContext( privKey1, true, WithNumSigners(numSigners), WithEarlyNonceGen(), @@ -289,20 +290,21 @@ func TestMuSigEarlyNonce(t *testing.T) { pubKey1 := ctx1.PubKey() ctx2, err := NewContext( - privKey2, true, WithNumSigners(numSigners), WithEarlyNonceGen(), + privKey2, true, WithKnownSigners(signers), WithEarlyNonceGen(), ) if err != nil { t.Fatalf("unable to make ctx: %v", err) } pubKey2 := ctx2.PubKey() - // At this point, the combined key shouldn't be available for both - // signers, since we only know of the sole signers. + // At this point, the combined key shouldn't be available for signer 1, + // but should be for signer 2, as they know about all signers. if _, err := ctx1.CombinedKey(); !errors.Is(err, ErrNotEnoughSigners) { t.Fatalf("unepxected error: %v", err) } - if _, err := ctx2.CombinedKey(); !errors.Is(err, ErrNotEnoughSigners) { - t.Fatalf("unepxected error: %v", err) + _, err = ctx2.CombinedKey() + if err != nil { + t.Fatalf("unable to get combined key: %v", err) } // The early nonces _should_ be available at this point. @@ -320,8 +322,8 @@ func TestMuSigEarlyNonce(t *testing.T) { t.Fatalf("expected 1 signer, instead have: %v", ctx1.NumRegisteredSigners()) } - if ctx2.NumRegisteredSigners() != 1 { - t.Fatalf("expected 1 signer, instead have: %v", + if ctx2.NumRegisteredSigners() != 2 { + t.Fatalf("expected 2 signers, instead have: %v", ctx2.NumRegisteredSigners()) } @@ -336,7 +338,7 @@ func TestMuSigEarlyNonce(t *testing.T) { t.Fatalf("unexpected combined key error: %v", err) } - // We'll now register the other signer for both parties. + // We'll now register the other signer for party 1. done, err := ctx1.RegisterSigner(&pubKey2) if err != nil { t.Fatalf("unable to register signer: %v", err) @@ -344,13 +346,6 @@ func TestMuSigEarlyNonce(t *testing.T) { if !done { t.Fatalf("signer 1 doesn't have all keys") } - done, err = ctx2.RegisterSigner(&pubKey1) - if err != nil { - t.Fatalf("unable to register signer: %v", err) - } - if !done { - t.Fatalf("signer 2 doesn't have all keys") - } // If we try to register the signer again, we should get an error. _, err = ctx2.RegisterSigner(&pubKey1) @@ -370,7 +365,7 @@ func TestMuSigEarlyNonce(t *testing.T) { msg := sha256.Sum256([]byte("let's get taprooty, LN style")) - // If we try to sign before we have the combined nonce, we shoudl get + // If we try to sign before we have the combined nonce, we should get // an error. _, err = session1.Sign(msg) if !errors.Is(err, ErrCombinedNonceUnavailable) { diff --git a/btcec/schnorr/musig2/nonces.go b/btcec/schnorr/musig2/nonces.go index 4b2509a754..988b199471 100644 --- a/btcec/schnorr/musig2/nonces.go +++ b/btcec/schnorr/musig2/nonces.go @@ -6,6 +6,7 @@ import ( "bytes" "crypto/rand" "encoding/binary" + "errors" "io" "github.com/btcsuite/btcd/btcec/v2" @@ -21,7 +22,7 @@ const ( // SecNonceSize is the size of the secret nonces for musig2. The secret // nonces are the corresponding private keys to the public nonce points. - SecNonceSize = 64 + SecNonceSize = 97 ) var ( @@ -34,6 +35,10 @@ var ( NonceGenTag = []byte("MuSig/nonce") byteOrder = binary.BigEndian + + // ErrPubkeyInvalid is returned when the pubkey of the WithPublicKey + // option is not passed or of invalid length. + ErrPubkeyInvalid = errors.New("nonce generation requires a valid pubkey") ) // zeroSecNonce is a secret nonce that's all zeroes. This is used to check that @@ -96,6 +101,10 @@ type nonceGenOpts struct { // used in place. randReader io.Reader + // publicKey is the mandatory public key that will be mixed into the nonce + // generation. + publicKey []byte + // secretKey is an optional argument that's used to further augment the // generated nonce by xor'ing it with this secret key. secretKey []byte @@ -142,6 +151,14 @@ func WithCustomRand(r io.Reader) NonceGenOption { } } +// WithPublicKey is the mandatory public key that will be mixed into the nonce +// generation. +func WithPublicKey(pubKey *btcec.PublicKey) NonceGenOption { + return func(o *nonceGenOpts) { + o.publicKey = pubKey.SerializeCompressed() + } +} + // WithNonceSecretKeyAux allows a caller to optionally specify a secret key // that should be used to augment the randomness used to generate the nonces. func WithNonceSecretKeyAux(secKey *btcec.PrivateKey) NonceGenOption { @@ -186,6 +203,7 @@ func withCustomOptions(customOpts nonceGenOpts) NonceGenOption { o.combinedKey = customOpts.combinedKey o.auxInput = customOpts.auxInput o.msg = customOpts.msg + o.publicKey = customOpts.publicKey } } @@ -233,13 +251,13 @@ func writeBytesPrefix(w io.Writer, b []byte, lenWriter lengthWriter) error { // genNonceAuxBytes writes out the full byte string used to derive a secret // nonce based on some initial randomness as well as the series of optional // fields. The byte string used for derivation is: -// - tagged_hash("MuSig/nonce", rand || len(aggpk) || aggpk || m_prefixed -// || len(in) || in || i). +// - tagged_hash("MuSig/nonce", rand || len(pk) || pk || +// len(aggpk) || aggpk || m_prefixed || len(in) || in || i). // // where i is the ith secret nonce being generated and m_prefixed is: // - bytes(1, 0) if the message is blank // - bytes(1, 1) || bytes(8, len(m)) || m if the message is present. -func genNonceAuxBytes(rand []byte, i int, +func genNonceAuxBytes(rand []byte, pubkey []byte, i int, opts *nonceGenOpts) (*chainhash.Hash, error) { var w bytes.Buffer @@ -249,8 +267,14 @@ func genNonceAuxBytes(rand []byte, i int, return nil, err } + // Next, we'll write out: len(pk) || pk + err := writeBytesPrefix(&w, pubkey, uint8Writer) + if err != nil { + return nil, err + } + // Next, we'll write out: len(aggpk) || aggpk. - err := writeBytesPrefix(&w, opts.combinedKey, uint8Writer) + err = writeBytesPrefix(&w, opts.combinedKey, uint8Writer) if err != nil { return nil, err } @@ -305,6 +329,11 @@ func GenNonces(options ...NonceGenOption) (*Nonces, error) { opt(opts) } + // We require the pubkey option. + if opts.publicKey == nil || len(opts.publicKey) != 33 { + return nil, ErrPubkeyInvalid + } + // First, we'll start out by generating 32 random bytes drawn from our // CSPRNG. var randBytes [32]byte @@ -322,13 +351,13 @@ func GenNonces(options ...NonceGenOption) (*Nonces, error) { } } - // Using our randomness and the set of optional params, generate our + // Using our randomness, pubkey and the set of optional params, generate our // two secret nonces: k1 and k2. - k1, err := genNonceAuxBytes(randBytes[:], 0, opts) + k1, err := genNonceAuxBytes(randBytes[:], opts.publicKey, 0, opts) if err != nil { return nil, err } - k2, err := genNonceAuxBytes(randBytes[:], 1, opts) + k2, err := genNonceAuxBytes(randBytes[:], opts.publicKey, 1, opts) if err != nil { return nil, err } @@ -338,10 +367,11 @@ func GenNonces(options ...NonceGenOption) (*Nonces, error) { k2Mod.SetBytes((*[32]byte)(k2)) // The secret nonces are serialized as the concatenation of the two 32 - // byte secret nonce values. + // byte secret nonce values and the pubkey. var nonces Nonces k1Mod.PutBytesUnchecked(nonces.SecNonce[:]) k2Mod.PutBytesUnchecked(nonces.SecNonce[btcec.PrivKeyBytesLen:]) + copy(nonces.SecNonce[btcec.PrivKeyBytesLen*2:], opts.publicKey) // Next, we'll generate R_1 = k_1*G and R_2 = k_2*G. Along the way we // need to map our nonce values into mod n scalars so we can work with diff --git a/btcec/schnorr/musig2/nonces_test.go b/btcec/schnorr/musig2/nonces_test.go index bdb76dbd35..7105d83b30 100644 --- a/btcec/schnorr/musig2/nonces_test.go +++ b/btcec/schnorr/musig2/nonces_test.go @@ -20,6 +20,7 @@ type nonceGenTestCase struct { AggPk string `json:"aggpk"` Msg *string `json:"msg"` ExtraIn string `json:"extra_in"` + Pk string `json:"pk"` Expected string `json:"expected"` } @@ -55,6 +56,7 @@ func TestMusig2NonceGenTestVectors(t *testing.T) { secretKey: mustParseHex(testCase.Sk), combinedKey: mustParseHex(testCase.AggPk), auxInput: mustParseHex(testCase.ExtraIn), + publicKey: mustParseHex(testCase.Pk), } if testCase.Msg != nil { customOpts.msg = mustParseHex(*testCase.Msg) diff --git a/btcec/schnorr/musig2/sign.go b/btcec/schnorr/musig2/sign.go index 2028571137..9204611dd6 100644 --- a/btcec/schnorr/musig2/sign.go +++ b/btcec/schnorr/musig2/sign.go @@ -38,6 +38,15 @@ var ( // ErrSecretNonceZero is returned when a secret nonce is passed in a // zero. ErrSecretNonceZero = fmt.Errorf("secret nonce is blank") + + // ErrSecNoncePubkey is returned when the signing key does not match the + // sec nonce pubkey + ErrSecNoncePubkey = fmt.Errorf("public key does not match secnonce") + + // ErrPubkeyNotIncluded is returned when the signers pubkey is not included + // in the list of pubkeys. + ErrPubkeyNotIncluded = fmt.Errorf("signer's pubkey must be included" + + " in the list of pubkeys") ) // infinityPoint is the jacobian representation of the point at infinity. @@ -252,6 +261,25 @@ func Sign(secNonce [SecNonceSize]byte, privKey *btcec.PrivateKey, option(opts) } + // Check that our signing key belongs to the secNonce + if !bytes.Equal(secNonce[btcec.PrivKeyBytesLen*2:], + privKey.PubKey().SerializeCompressed()) { + + return nil, ErrSecNoncePubkey + } + + // Check that the key set contains the public key to our private key. + var containsPrivKey bool + for _, pk := range pubKeys { + if privKey.PubKey().IsEqual(pk) { + containsPrivKey = true + } + } + + if !containsPrivKey { + return nil, ErrPubkeyNotIncluded + } + // Compute the hash of all the keys here as we'll need it do aggregate // the keys and also at the final step of signing. keysHash := keyHashFingerprint(pubKeys, opts.sortKeys) diff --git a/btcec/schnorr/signature.go b/btcec/schnorr/signature.go index f4532c7d09..8876a6070d 100644 --- a/btcec/schnorr/signature.go +++ b/btcec/schnorr/signature.go @@ -51,8 +51,9 @@ func NewSignature(r *btcec.FieldVal, s *btcec.ModNScalar) *Signature { // Serialize returns the Schnorr signature in the more strict format. // // The signatures are encoded as -// sig[0:32] x coordinate of the point R, encoded as a big-endian uint256 -// sig[32:64] s, encoded also as big-endian uint256 +// +// sig[0:32] x coordinate of the point R, encoded as a big-endian uint256 +// sig[32:64] s, encoded also as big-endian uint256 func (sig Signature) Serialize() []byte { // Total length of returned signature is the length of r and s. var b [SignatureSize]byte @@ -90,10 +91,7 @@ func ParseSignature(sig []byte) (*Signature, error) { return nil, signatureError(ecdsa_schnorr.ErrSigRTooBig, str) } var s btcec.ModNScalar - if overflow := s.SetByteSlice(sig[32:64]); overflow { - str := "invalid signature: s >= group order" - return nil, signatureError(ecdsa_schnorr.ErrSigSTooBig, str) - } + s.SetByteSlice(sig[32:64]) // Return the signature. return NewSignature(&r, &s), nil @@ -126,8 +124,7 @@ func schnorrVerify(sig *Signature, hash []byte, pubKeyBytes []byte) error { // 7. Fail if is_infinite(R) // 8. Fail if not hash_even_y(R) // 9. Fail is x(R) != r. - // 10. Return success iff not failure occured before reachign this - // point. + // 10. Return success iff failure did not occur before reaching this point. // Step 1. // @@ -176,10 +173,7 @@ func schnorrVerify(sig *Signature, hash []byte, pubKeyBytes []byte) error { ) var e btcec.ModNScalar - if overflow := e.SetBytes((*[32]byte)(commitment)); overflow != 0 { - str := "hash of (r || P || m) too big" - return signatureError(ecdsa_schnorr.ErrSchnorrHashValue, str) - } + e.SetBytes((*[32]byte)(commitment)) // Negate e here so we can use AddNonConst below to subtract the s*G // point from e*P. @@ -225,7 +219,7 @@ func schnorrVerify(sig *Signature, hash []byte, pubKeyBytes []byte) error { // Step 10. // - // Return success iff not failure occured before reachign this + // Return success iff failure did not occur before reaching this point. return nil } @@ -243,14 +237,14 @@ func zeroArray(a *[scalarSize]byte) { } } -// schnorrSign generates an BIP-340 signature over the secp256k1 curve for the +// schnorrSign generates a BIP-340 signature over the secp256k1 curve for the // provided hash (which should be the result of hashing a larger message) using // the given nonce and private key. The produced signature is deterministic // (same message, nonce, and key yield the same signature) and canonical. // // WARNING: The hash MUST be 32 bytes and both the nonce and private keys must // NOT be 0. Since this is an internal use function, these preconditions MUST -// be satisified by the caller. +// be satisfied by the caller. func schnorrSign(privKey, nonce *btcec.ModNScalar, pubKey *btcec.PublicKey, hash []byte, opts *signOptions) (*Signature, error) { @@ -261,7 +255,7 @@ func schnorrSign(privKey, nonce *btcec.ModNScalar, pubKey *btcec.PublicKey, hash // n = curve order // d = private key // m = message - // a = input randmoness + // a = input randomness // r, s = signature // // 1. d' = int(d) @@ -282,7 +276,7 @@ func schnorrSign(privKey, nonce *btcec.ModNScalar, pubKey *btcec.PublicKey, hash // // Note that the set of functional options passed in may modify the // above algorithm. Namely if CustomNonce is used, then steps 6-8 are - // replaced with a process that generates the nonce using rfc6679. If + // replaced with a process that generates the nonce using rfc6979. If // FastSign is passed, then we skip set 14. // NOTE: Steps 1-9 are performed by the caller. @@ -308,13 +302,9 @@ func schnorrSign(privKey, nonce *btcec.ModNScalar, pubKey *btcec.PublicKey, hash // Step 12. // // e = tagged_hash("BIP0340/challenge", bytes(R) || bytes(P) || m) mod n - var rBytes [32]byte - r := &R.X - r.PutBytesUnchecked(rBytes[:]) pBytes := SerializePubKey(pubKey) - commitment := chainhash.TaggedHash( - chainhash.TagBIP0340Challenge, rBytes[:], pBytes, hash, + chainhash.TagBIP0340Challenge, R.X.Bytes()[:], pBytes, hash, ) var e btcec.ModNScalar @@ -330,7 +320,7 @@ func schnorrSign(privKey, nonce *btcec.ModNScalar, pubKey *btcec.PublicKey, hash s := new(btcec.ModNScalar).Mul2(&e, privKey).Add(&k) k.Zero() - sig := NewSignature(r, s) + sig := NewSignature(&R.X, s) // Step 14. // @@ -347,8 +337,8 @@ func schnorrSign(privKey, nonce *btcec.ModNScalar, pubKey *btcec.PublicKey, hash return sig, nil } -// SignOption is a functional option arguemnt that allows callers to modify the -// way we generate BIP-340 schnorr signatues. +// SignOption is a functional option argument that allows callers to modify the +// way we generate BIP-340 schnorr signatures. type SignOption func(*signOptions) // signOptions houses the set of functional options that can be used to modify @@ -369,7 +359,7 @@ func defaultSignOptions() *signOptions { } // FastSign forces signing to skip the extra verification step at the end. -// Peformance sensitive applications may opt to use this option to speed up the +// Performance sensitive applications may opt to use this option to speed up the // signing operation. func FastSign() SignOption { return func(o *signOptions) { @@ -414,7 +404,7 @@ func Sign(privKey *btcec.PrivateKey, hash []byte, // n = curve order // d = private key // m = message - // a = input randmoness + // a = input randomness // r, s = signature // // 1. d' = int(d) @@ -435,13 +425,14 @@ func Sign(privKey *btcec.PrivateKey, hash []byte, // // Note that the set of functional options passed in may modify the // above algorithm. Namely if CustomNonce is used, then steps 6-8 are - // replaced with a process that generates the nonce using rfc6679. If + // replaced with a process that generates the nonce using rfc6979. If // FastSign is passed, then we skip set 14. // Step 1. // // d' = int(d) - privKeyScalar := &privKey.Key + var privKeyScalar btcec.ModNScalar + privKeyScalar.Set(&privKey.Key) // Step 2. // @@ -475,7 +466,7 @@ func Sign(privKey *btcec.PrivateKey, hash []byte, // At this point, we check to see if a CustomNonce has been passed in, // and if so, then we'll deviate from the main routine here by - // generating the nonce value as specifid by BIP-0340. + // generating the nonce value as specified by BIP-0340. if opts.authNonce != nil { // Step 6. // @@ -512,7 +503,7 @@ func Sign(privKey *btcec.PrivateKey, hash []byte, return nil, signatureError(ecdsa_schnorr.ErrSchnorrHashValue, str) } - sig, err := schnorrSign(privKeyScalar, &kPrime, pub, hash, opts) + sig, err := schnorrSign(&privKeyScalar, &kPrime, pub, hash, opts) kPrime.Zero() if err != nil { return nil, err @@ -535,7 +526,7 @@ func Sign(privKey *btcec.PrivateKey, hash []byte, ) // Steps 10-15. - sig, err := schnorrSign(privKeyScalar, k, pub, hash, opts) + sig, err := schnorrSign(&privKeyScalar, k, pub, hash, opts) k.Zero() if err != nil { // Try again with a new nonce. diff --git a/btcec/schnorr/signature_test.go b/btcec/schnorr/signature_test.go index b99614ff6c..2f96b7e4d5 100644 --- a/btcec/schnorr/signature_test.go +++ b/btcec/schnorr/signature_test.go @@ -10,8 +10,10 @@ import ( "errors" "strings" "testing" + "testing/quick" "github.com/btcsuite/btcd/btcec/v2" + "github.com/davecgh/go-spew/spew" secp_ecdsa "github.com/decred/dcrd/dcrec/secp256k1/v4" ecdsa_schnorr "github.com/decred/dcrd/dcrec/secp256k1/v4/schnorr" ) @@ -254,3 +256,38 @@ func TestSchnorrVerify(t *testing.T) { } } } + +// TestSchnorrSignNoMutate tests that generating a schnorr signature doesn't +// modify/mutate the underlying private key. +func TestSchnorrSignNoMutate(t *testing.T) { + t.Parallel() + + // Assert that given a random private key and message, we can generate + // a signature from that w/o modifying the underlying private key. + f := func(privBytes, msg [32]byte) bool { + privBytesCopy := privBytes + privKey, _ := btcec.PrivKeyFromBytes(privBytesCopy[:]) + + // Generate a signature for private key with our message. + _, err := Sign(privKey, msg[:]) + if err != nil { + t.Logf("unable to gen sig: %v", err) + return false + } + + // We should be able to re-derive the private key from raw + // bytes and have that match up again. + privKeyCopy, _ := btcec.PrivKeyFromBytes(privBytes[:]) + if *privKey != *privKeyCopy { + t.Logf("private doesn't match: expected %v, got %v", + spew.Sdump(privKeyCopy), spew.Sdump(privKey)) + return false + } + + return true + } + + if err := quick.Check(f, nil); err != nil { + t.Fatalf("private key modified: %v", err) + } +} diff --git a/btcjson/chainsvrresults.go b/btcjson/chainsvrresults.go index 4feaeda338..41b93f8570 100644 --- a/btcjson/chainsvrresults.go +++ b/btcjson/chainsvrresults.go @@ -117,6 +117,14 @@ type GetBlockVerboseTxResult struct { NextHash string `json:"nextblockhash,omitempty"` } +// GetChainTipsResult models the data from the getchaintips command. +type GetChainTipsResult struct { + Height int32 `json:"height"` + Hash string `json:"hash"` + BranchLen int32 `json:"branchlen"` + Status string `json:"status"` +} + // GetChainTxStatsResult models the data from the getchaintxstats command. type GetChainTxStatsResult struct { Time int64 `json:"time"` @@ -139,9 +147,10 @@ type CreateMultiSigResult struct { // DecodeScriptResult models the data returned from the decodescript command. type DecodeScriptResult struct { Asm string `json:"asm"` - ReqSigs int32 `json:"reqSigs,omitempty"` + ReqSigs int32 `json:"reqSigs,omitempty"` // Deprecated: removed in Bitcoin Core Type string `json:"type"` - Addresses []string `json:"addresses,omitempty"` + Address string `json:"address,omitempty"` + Addresses []string `json:"addresses,omitempty"` // Deprecated: removed in Bitcoin Core P2sh string `json:"p2sh,omitempty"` } @@ -429,9 +438,10 @@ type GetRawMempoolVerboseResult struct { type ScriptPubKeyResult struct { Asm string `json:"asm"` Hex string `json:"hex,omitempty"` - ReqSigs int32 `json:"reqSigs,omitempty"` + ReqSigs int32 `json:"reqSigs,omitempty"` // Deprecated: removed in Bitcoin Core Type string `json:"type"` - Addresses []string `json:"addresses,omitempty"` + Address string `json:"address,omitempty"` + Addresses []string `json:"addresses,omitempty"` // Deprecated: removed in Bitcoin Core } // GetTxOutResult models the data from the gettxout command. diff --git a/btcjson/cmdparse.go b/btcjson/cmdparse.go index 4fb8dd6260..5cf3215e52 100644 --- a/btcjson/cmdparse.go +++ b/btcjson/cmdparse.go @@ -495,7 +495,7 @@ func assignField(paramNum int, fieldName string, dest reflect.Value, src reflect // by this package are already registered by default. // // The arguments are most efficient when they are the exact same type as the -// underlying field in the command struct associated with the the method, +// underlying field in the command struct associated with the method, // however this function also will perform a variety of conversions to make it // more flexible. This allows, for example, command line args which are strings // to be passed unaltered. In particular, the following conversions are diff --git a/btcjson/doc.go b/btcjson/doc.go index 165b9ef91c..f0456716b9 100644 --- a/btcjson/doc.go +++ b/btcjson/doc.go @@ -5,16 +5,16 @@ /* Package btcjson provides primitives for working with the bitcoin JSON-RPC API. -Overview +# Overview When communicating via the JSON-RPC protocol, all of the commands need to be -marshalled to and from the the wire in the appropriate format. This package +marshalled to and from the wire in the appropriate format. This package provides data structures and primitives to ease this process. In addition, it also provides some additional features such as custom command registration, command categorization, and reflection-based help generation. -JSON-RPC Protocol Overview +# JSON-RPC Protocol Overview This information is not necessary in order to use this package, but it does provide some intuition into what the marshalling and unmarshalling that is @@ -47,39 +47,39 @@ with it) doesn't always follow the spec and will sometimes return an error string in the result field with a null error for certain commands. However, for the most part, the error field will be set as described on failure. -Marshalling and Unmarshalling +# Marshalling and Unmarshalling Based upon the discussion above, it should be easy to see how the types of this package map into the required parts of the protocol - Request Objects (type Request) - - Commands (type Cmd) - - Notifications (type Ntfn) + 1. Commands (type Cmd) + 2. Notifications (type Ntfn) - Response Objects (type Response) - - Result (type Result) + 1. Result (type Result) To simplify the marshalling of the requests and responses, the MarshalCmd and MarshalResponse functions are provided. They return the raw bytes ready to be sent across the wire. Unmarshalling a received Request object is a two step process: - 1) Unmarshal the raw bytes into a Request struct instance via json.Unmarshal - 2) Use UnmarshalCmd on the Result field of the unmarshalled Request to create - a concrete command or notification instance with all struct fields set - accordingly + 1. Unmarshal the raw bytes into a Request struct instance via json.Unmarshal + 2. Use UnmarshalCmd on the Result field of the unmarshalled Request to create + a concrete command or notification instance with all struct fields set + accordingly This approach is used since it provides the caller with access to the additional fields in the request that are not part of the command such as the ID. Unmarshalling a received Response object is also a two step process: - 1) Unmarhsal the raw bytes into a Response struct instance via json.Unmarshal - 2) Depending on the ID, unmarshal the Result field of the unmarshalled - Response to create a concrete type instance + 1. Unmarhsal the raw bytes into a Response struct instance via json.Unmarshal + 2. Depending on the ID, unmarshal the Result field of the unmarshalled + Response to create a concrete type instance As above, this approach is used since it provides the caller with access to the fields in the response such as the ID and Error. -Command Creation +# Command Creation This package provides two approaches for creating a new command. This first, and preferred, method is to use one of the NewCmd functions. This allows @@ -93,7 +93,7 @@ obviously, run-time which means any mistakes won't be found until the code is actually executed. However, it is quite useful for user-supplied commands that are intentionally dynamic. -Custom Command Registration +# Custom Command Registration The command handling of this package is built around the concept of registered commands. This is true for the wide variety of commands already provided by the @@ -104,7 +104,7 @@ function for this purpose. A list of all registered methods can be obtained with the RegisteredCmdMethods function. -Command Inspection +# Command Inspection All registered commands are registered with flags that identify information such as whether the command applies to a chain server, wallet server, or is a @@ -112,7 +112,7 @@ notification along with the method name to use. These flags can be obtained with the MethodUsageFlags flags, and the method can be obtained with the CmdMethod function. -Help Generation +# Help Generation To facilitate providing consistent help to users of the RPC server, this package exposes the GenerateHelp and function which uses reflection on registered @@ -122,7 +122,7 @@ generate the final help text. In addition, the MethodUsageText function is provided to generate consistent one-line usage for registered commands and notifications using reflection. -Errors +# Errors There are 2 distinct type of errors supported by this package: diff --git a/btcjson/help.go b/btcjson/help.go index f502d09fd8..2cc55b8410 100644 --- a/btcjson/help.go +++ b/btcjson/help.go @@ -476,11 +476,12 @@ func isValidResultType(kind reflect.Kind) bool { // an error will use the key in place of the description. // // The following outlines the required keys: -// "--synopsis" Synopsis for the command -// "-" Description for each command argument -// "-" Description for each object field -// "--condition<#>" Description for each result condition -// "--result<#>" Description for each primitive result num +// +// "--synopsis" Synopsis for the command +// "-" Description for each command argument +// "-" Description for each object field +// "--condition<#>" Description for each result condition +// "--result<#>" Description for each primitive result num // // Notice that the "special" keys synopsis, condition<#>, and result<#> are // preceded by a double dash to ensure they don't conflict with field names. @@ -492,16 +493,17 @@ func isValidResultType(kind reflect.Kind) bool { // For example, consider the 'help' command itself. There are two possible // returns depending on the provided parameters. So, the help would be // generated by calling the function as follows: -// GenerateHelp("help", descs, (*string)(nil), (*string)(nil)). +// +// GenerateHelp("help", descs, (*string)(nil), (*string)(nil)). // // The following keys would then be required in the provided descriptions map: // -// "help--synopsis": "Returns a list of all commands or help for ...." -// "help-command": "The command to retrieve help for", -// "help--condition0": "no command provided" -// "help--condition1": "command specified" -// "help--result0": "List of commands" -// "help--result1": "Help for specified command" +// "help--synopsis": "Returns a list of all commands or help for ...." +// "help-command": "The command to retrieve help for", +// "help--condition0": "no command provided" +// "help--condition1": "command specified" +// "help--result0": "List of commands" +// "help--result1": "Help for specified command" func GenerateHelp(method string, descs map[string]string, resultTypes ...interface{}) (string, error) { // Look up details about the provided method and error out if not // registered. diff --git a/btcjson/walletsvrcmds.go b/btcjson/walletsvrcmds.go index 5983d3f783..979ab0c25b 100644 --- a/btcjson/walletsvrcmds.go +++ b/btcjson/walletsvrcmds.go @@ -875,7 +875,8 @@ func (s *ScriptPubKey) UnmarshalJSON(data []byte) error { // // Descriptors are typically ranged when specified in the form of generic HD // chain paths. -// Example of a ranged descriptor: pkh(tpub.../*) +// +// Example of a ranged descriptor: pkh(tpub.../*) // // The value can be an int to specify the end of the range, or the range // itself, as []int{begin, end}. diff --git a/btcjson/walletsvrresults.go b/btcjson/walletsvrresults.go index 78a6e647f5..d85db0a6fa 100644 --- a/btcjson/walletsvrresults.go +++ b/btcjson/walletsvrresults.go @@ -48,11 +48,11 @@ type embeddedAddressInfo struct { // Reference: https://bitcoincore.org/en/doc/0.20.0/rpc/wallet/getaddressinfo // // The GetAddressInfoResult has three segments: -// 1. General information about the address. -// 2. Metadata (Timestamp, HDKeyPath, HDSeedID) and wallet fields -// (IsMine, IsWatchOnly). -// 3. Information about the embedded address in case of P2SH or P2WSH. -// Same structure as (1). +// 1. General information about the address. +// 2. Metadata (Timestamp, HDKeyPath, HDSeedID) and wallet fields +// (IsMine, IsWatchOnly). +// 3. Information about the embedded address in case of P2SH or P2WSH. +// Same structure as (1). type GetAddressInfoResult struct { embeddedAddressInfo IsMine bool `json:"ismine"` diff --git a/btcutil/amount.go b/btcutil/amount.go index 71714153aa..bb70c9b11d 100644 --- a/btcutil/amount.go +++ b/btcutil/amount.go @@ -6,8 +6,10 @@ package btcutil import ( "errors" + "fmt" "math" "strconv" + "strings" ) // AmountUnit describes a method of converting an Amount to something @@ -101,11 +103,20 @@ func (a Amount) ToBTC() float64 { // Format formats a monetary amount counted in bitcoin base units as a // string for a given unit. The conversion will succeed for any unit, -// however, known units will be formated with an appended label describing +// however, known units will be formatted with an appended label describing // the units with SI notation, or "Satoshi" for the base unit. func (a Amount) Format(u AmountUnit) string { units := " " + u.String() - return strconv.FormatFloat(a.ToUnit(u), 'f', -int(u+8), 64) + units + formatted := strconv.FormatFloat(a.ToUnit(u), 'f', -int(u+8), 64) + + // When formatting full BTC, add trailing zeroes for numbers + // with decimal point to ease reading of sat amount. + if u == AmountBTC { + if strings.Contains(formatted, ".") { + return fmt.Sprintf("%.8f%s", a.ToUnit(u), units) + } + } + return formatted + units } // String is the equivalent of calling Format with AmountBTC. diff --git a/btcutil/amount_test.go b/btcutil/amount_test.go index 2b6c3f753d..69498b07e2 100644 --- a/btcutil/amount_test.go +++ b/btcutil/amount_test.go @@ -136,8 +136,29 @@ func TestAmountUnitConversions(t *testing.T) { name: "BTC", amount: 44433322211100, unit: AmountBTC, - converted: 444333.22211100, - s: "444333.222111 BTC", + converted: 444333.222111, + s: "444333.22211100 BTC", + }, + { + name: "a thousand satoshi as BTC", + amount: 1000, + unit: AmountBTC, + converted: 0.00001, + s: "0.00001000 BTC", + }, + { + name: "a single satoshi as BTC", + amount: 1, + unit: AmountBTC, + converted: 0.00000001, + s: "0.00000001 BTC", + }, + { + name: "amount with trailing zero but no decimals", + amount: 1000000000, + unit: AmountBTC, + converted: 10, + s: "10 BTC", }, { name: "mBTC", diff --git a/btcutil/appdata.go b/btcutil/appdata.go index e36cf7c4a4..b6c63b9a29 100644 --- a/btcutil/appdata.go +++ b/btcutil/appdata.go @@ -95,11 +95,12 @@ func appDataDir(goos, appName string, roaming bool) string { // (%LOCALAPPDATA%) that is used by default. // // Example results: -// dir := AppDataDir("myapp", false) -// POSIX (Linux/BSD): ~/.myapp -// Mac OS: $HOME/Library/Application Support/Myapp -// Windows: %LOCALAPPDATA%\Myapp -// Plan 9: $home/myapp +// +// dir := AppDataDir("myapp", false) +// POSIX (Linux/BSD): ~/.myapp +// Mac OS: $HOME/Library/Application Support/Myapp +// Windows: %LOCALAPPDATA%\Myapp +// Plan 9: $home/myapp func AppDataDir(appName string, roaming bool) string { return appDataDir(runtime.GOOS, appName, roaming) } diff --git a/btcutil/base58/base58.go b/btcutil/base58/base58.go index 8ee5956718..bd0ea47cd9 100644 --- a/btcutil/base58/base58.go +++ b/btcutil/base58/base58.go @@ -55,6 +55,10 @@ func Decode(b string) []byte { total := uint64(0) for _, v := range t[:n] { + if v > 255 { + return []byte("") + } + tmp := b58[v] if tmp == 255 { return []byte("") diff --git a/btcutil/base58/base58_test.go b/btcutil/base58/base58_test.go index b868d1d401..eb7e4d4bcf 100644 --- a/btcutil/base58/base58_test.go +++ b/btcutil/base58/base58_test.go @@ -43,6 +43,8 @@ var invalidStringTests = []struct { {"4kl8", ""}, {"0OIl", ""}, {"!@#$%^&*()-_=+~`", ""}, + {"abcd\xd80", ""}, + {"abcd\U000020BF", ""}, } var hexTests = []struct { diff --git a/btcutil/base58/doc.go b/btcutil/base58/doc.go index 9a2c0e6e3d..d657f050f7 100644 --- a/btcutil/base58/doc.go +++ b/btcutil/base58/doc.go @@ -6,7 +6,7 @@ Package base58 provides an API for working with modified base58 and Base58Check encodings. -Modified Base58 Encoding +# Modified Base58 Encoding Standard base58 encoding is similar to standard base64 encoding except, as the name implies, it uses a 58 character alphabet which results in an alphanumeric @@ -17,7 +17,7 @@ The modified base58 alphabet used by Bitcoin, and hence this package, omits the 0, O, I, and l characters that look the same in many fonts and are therefore hard to humans to distinguish. -Base58Check Encoding Scheme +# Base58Check Encoding Scheme The Base58Check encoding scheme is primarily used for Bitcoin addresses at the time of this writing, however it can be used to generically encode arbitrary diff --git a/btcutil/base58/genalphabet.go b/btcutil/base58/genalphabet.go index 010cbee39e..959f34d4e2 100644 --- a/btcutil/base58/genalphabet.go +++ b/btcutil/base58/genalphabet.go @@ -2,7 +2,8 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -//+build ignore +//go:build ignore +// +build ignore package main diff --git a/btcutil/bench_test.go b/btcutil/bench_test.go new file mode 100644 index 0000000000..c1f52da5b7 --- /dev/null +++ b/btcutil/bench_test.go @@ -0,0 +1,80 @@ +package btcutil_test + +import ( + "testing" + + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +var ( + bencHash *chainhash.Hash +) + +// BenchmarkTxHash benchmarks the performance of calculating the hash of a +// transaction. +func BenchmarkTxHash(b *testing.B) { + // Make a new block from the test block, we'll then call the Bytes + // function to cache the serialized block. Afterwards we all + // Transactions to populate the serialization cache. + testBlock := btcutil.NewBlock(&Block100000) + _, _ = testBlock.Bytes() + + // The second transaction in the block has no witness data. The first + // does however. + testTx := testBlock.Transactions()[1] + testTx2 := testBlock.Transactions()[0] + + // Run a benchmark for the portion that needs to strip the non-witness + // data from the transaction. + b.Run("tx_hash_has_witness", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + + var txHash *chainhash.Hash + for i := 0; i < b.N; i++ { + txHash = testTx2.Hash() + } + + bencHash = txHash + }) + + // Next, run it for the portion that can just hash the bytes directly. + b.Run("tx_hash_no_witness", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + + var txHash *chainhash.Hash + for i := 0; i < b.N; i++ { + txHash = testTx.Hash() + } + + bencHash = txHash + }) + +} + +// BenchmarkTxWitnessHash benchmarks the performance of calculating the hash of +// a transaction. +func BenchmarkTxWitnessHash(b *testing.B) { + // Make a new block from the test block, we'll then call the Bytes + // function to cache the serialized block. Afterwards we all + // Transactions to populate the serialization cache. + testBlock := btcutil.NewBlock(&Block100000) + _, _ = testBlock.Bytes() + + // The first transaction in the block has been modified to have witness + // data. + testTx := testBlock.Transactions()[0] + + b.ResetTimer() + b.ReportAllocs() + + var txHash *chainhash.Hash + for i := 0; i < b.N; i++ { + txHash = testTx.WitnessHash() + } + + bencHash = txHash + +} diff --git a/btcutil/block.go b/btcutil/block.go index 7d38abc4a0..7f8d8786e3 100644 --- a/btcutil/block.go +++ b/btcutil/block.go @@ -154,12 +154,32 @@ func (b *Block) Transactions() []*Tx { b.transactions = make([]*Tx, len(b.msgBlock.Transactions)) } + // Offset of each tx. 80 accounts for the block header size. + offset := 80 + wire.VarIntSerializeSize( + uint64(len(b.msgBlock.Transactions)), + ) + // Generate and cache the wrapped transactions for all that haven't // already been done. for i, tx := range b.transactions { if tx == nil { newTx := NewTx(b.msgBlock.Transactions[i]) newTx.SetIndex(i) + + size := b.msgBlock.Transactions[i].SerializeSize() + + // The block may not always have the serializedBlock. + if len(b.serializedBlock) > 0 { + // This allows for the reuse of the already + // serialized tx. + newTx.setBytes( + b.serializedBlock[offset : offset+size], + ) + + // Increment offset for this block. + offset += size + } + b.transactions[i] = newTx } } @@ -234,6 +254,12 @@ func NewBlockFromBytes(serializedBlock []byte) (*Block, error) { return nil, err } b.serializedBlock = serializedBlock + + // This initializes []btcutil.Tx to have the serialized raw + // transactions cached. Helps speed up things like generating the + // txhash. + b.Transactions() + return b, nil } @@ -256,10 +282,19 @@ func NewBlockFromReader(r io.Reader) (*Block, error) { // NewBlockFromBlockAndBytes returns a new instance of a bitcoin block given // an underlying wire.MsgBlock and the serialized bytes for it. See Block. -func NewBlockFromBlockAndBytes(msgBlock *wire.MsgBlock, serializedBlock []byte) *Block { - return &Block{ +func NewBlockFromBlockAndBytes(msgBlock *wire.MsgBlock, + serializedBlock []byte) *Block { + + b := &Block{ msgBlock: msgBlock, serializedBlock: serializedBlock, blockHeight: BlockHeightUnknown, } + + // This initializes []btcutil.Tx to have the serialized raw + // transactions cached. Helps speed up things like generating the + // txhash. + b.Transactions() + + return b } diff --git a/btcutil/block_test.go b/btcutil/block_test.go index e24b9842f7..06e0ad2803 100644 --- a/btcutil/block_test.go +++ b/btcutil/block_test.go @@ -11,9 +11,9 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" "github.com/davecgh/go-spew/spew" ) diff --git a/btcutil/bloom/example_test.go b/btcutil/bloom/example_test.go index bcd5d0190b..e5a148a5ba 100644 --- a/btcutil/bloom/example_test.go +++ b/btcutil/bloom/example_test.go @@ -9,9 +9,9 @@ import ( "math/rand" "time" + "github.com/btcsuite/btcd/btcutil/bloom" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil/bloom" ) // This example demonstrates how to create a new bloom filter, add a transaction diff --git a/btcutil/bloom/filter.go b/btcutil/bloom/filter.go index 8c4527ea29..2eca228570 100644 --- a/btcutil/bloom/filter.go +++ b/btcutil/bloom/filter.go @@ -9,10 +9,10 @@ import ( "math" "sync" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // ln2Squared is simply the square of the natural log of 2. diff --git a/btcutil/bloom/filter_test.go b/btcutil/bloom/filter_test.go index 1811dbf57e..c4b839ad17 100644 --- a/btcutil/bloom/filter_test.go +++ b/btcutil/bloom/filter_test.go @@ -9,10 +9,10 @@ import ( "encoding/hex" "testing" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/btcutil/bloom" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" ) // TestFilterLarge ensures a maximum sized filter can be created. diff --git a/btcutil/bloom/merkleblock.go b/btcutil/bloom/merkleblock.go index 101a8f9194..468aa72a05 100644 --- a/btcutil/bloom/merkleblock.go +++ b/btcutil/bloom/merkleblock.go @@ -6,9 +6,9 @@ package bloom import ( "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // merkleBlock is used to house intermediate information needed to generate a @@ -21,7 +21,7 @@ type merkleBlock struct { bits []byte } -// calcTreeWidth calculates and returns the the number of nodes (width) or a +// calcTreeWidth calculates and returns the number of nodes (width) or a // merkle tree at the given depth-first height. func (m *merkleBlock) calcTreeWidth(height uint32) uint32 { return (m.numTx + (1 << height) - 1) >> height @@ -41,7 +41,8 @@ func (m *merkleBlock) calcHash(height, pos uint32) *chainhash.Hash { } else { right = left } - return blockchain.HashMerkleBranches(left, right) + res := blockchain.HashMerkleBranches(left, right) + return &res } // traverseAndBuild builds a partial merkle tree using a recursive depth-first diff --git a/btcutil/bloom/merkleblock_test.go b/btcutil/bloom/merkleblock_test.go index 15e21a4bb7..ae7b1f3430 100644 --- a/btcutil/bloom/merkleblock_test.go +++ b/btcutil/bloom/merkleblock_test.go @@ -9,10 +9,10 @@ import ( "encoding/hex" "testing" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/btcutil/bloom" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" ) func TestMerkleBlock3(t *testing.T) { diff --git a/btcutil/coinset/coins.go b/btcutil/coinset/coins.go index 9d813418b6..a0e680d8d6 100644 --- a/btcutil/coinset/coins.go +++ b/btcutil/coinset/coins.go @@ -9,9 +9,9 @@ import ( "errors" "sort" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // Coin represents a spendable transaction outpoint @@ -75,7 +75,7 @@ func (cs *CoinSet) TotalValue() (value btcutil.Amount) { } // TotalValueAge returns the total value * number of confirmations -// of the coins in the set. +// of the coins in the set. func (cs *CoinSet) TotalValueAge() (valueAge int64) { return cs.totalValueAge } @@ -238,7 +238,6 @@ func (s MaxValueAgeCoinSelector) CoinSelect(targetValue btcutil.Amount, coins [] // input priority over the threshold, but no guarantees will be made as to // minimality of the selection. The selection below is almost certainly // suboptimal. -// type MinPriorityCoinSelector struct { MaxInputs int MinChangeAmount btcutil.Amount diff --git a/btcutil/coinset/coins_test.go b/btcutil/coinset/coins_test.go index 874dc6c6d6..035a40cb99 100644 --- a/btcutil/coinset/coins_test.go +++ b/btcutil/coinset/coins_test.go @@ -11,10 +11,10 @@ import ( "fmt" "testing" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/btcutil/coinset" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" ) type TestCoin struct { diff --git a/btcutil/doc.go b/btcutil/doc.go index 36cda1c782..c4a4441201 100644 --- a/btcutil/doc.go +++ b/btcutil/doc.go @@ -5,21 +5,21 @@ /* Package btcutil provides bitcoin-specific convenience functions and types. -Block Overview +# Block Overview A Block defines a bitcoin block that provides easier and more efficient manipulation of raw wire protocol blocks. It also memoizes hashes for the block and its transactions on their first access so subsequent accesses don't have to repeat the relatively expensive hashing operations. -Tx Overview +# Tx Overview A Tx defines a bitcoin transaction that provides more efficient manipulation of raw wire protocol transactions. It memoizes the hash for the transaction on its first access so subsequent accesses don't have to repeat the relatively expensive hashing operations. -Address Overview +# Address Overview The Address interface provides an abstraction for a Bitcoin address. While the most common type is a pay-to-pubkey-hash, Bitcoin already supports others and diff --git a/btcutil/example_test.go b/btcutil/example_test.go index 6b62fdd44f..90be77b073 100644 --- a/btcutil/example_test.go +++ b/btcutil/example_test.go @@ -20,7 +20,7 @@ func ExampleAmount() { // Output: // Zero Satoshi: 0 BTC // 100,000,000 Satoshis: 1 BTC - // 100,000 Satoshis: 0.001 BTC + // 100,000 Satoshis: 0.00100000 BTC } func ExampleNewAmount() { @@ -69,7 +69,7 @@ func ExampleAmount_unitConversions() { // Output: // Satoshi to kBTC: 444.333222111 kBTC - // Satoshi to BTC: 444333.222111 BTC + // Satoshi to BTC: 444333.22211100 BTC // Satoshi to MilliBTC: 444333222.111 mBTC // Satoshi to MicroBTC: 444333222111 μBTC // Satoshi to Satoshi: 44433322211100 Satoshi diff --git a/btcutil/gcs/builder/builder.go b/btcutil/gcs/builder/builder.go index 6f15ec7a68..3a85ad0519 100644 --- a/btcutil/gcs/builder/builder.go +++ b/btcutil/gcs/builder/builder.go @@ -8,12 +8,13 @@ package builder import ( "crypto/rand" "fmt" + "io" "math" + "github.com/btcsuite/btcd/btcutil/gcs" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil/gcs" ) const ( @@ -348,7 +349,10 @@ func GetFilterHash(filter *gcs.Filter) (chainhash.Hash, error) { return chainhash.Hash{}, err } - return chainhash.DoubleHashH(filterData), nil + return chainhash.DoubleHashRaw(func(w io.Writer) error { + _, err := w.Write(filterData) + return err + }), nil } // MakeHeaderForFilter makes a filter chain header for a filter, given the @@ -367,5 +371,8 @@ func MakeHeaderForFilter(filter *gcs.Filter, prevHeader chainhash.Hash) (chainha // The final filter hash is the double-sha256 of the hash computed // above. - return chainhash.DoubleHashH(filterTip), nil + return chainhash.DoubleHashRaw(func(w io.Writer) error { + _, err := w.Write(filterTip) + return err + }), nil } diff --git a/btcutil/gcs/doc.go b/btcutil/gcs/doc.go index 780fd76631..8e67e369a6 100644 --- a/btcutil/gcs/doc.go +++ b/btcutil/gcs/doc.go @@ -6,14 +6,14 @@ /* Package gcs provides an API for building and using a Golomb-coded set filter. -Golomb-Coded Set +# Golomb-Coded Set A Golomb-coded set is a probabilistic data structure used similarly to a Bloom filter. A filter uses constant-size overhead plus on average n+2 bits per item added to the filter, where 2^-n is the desired false positive (collision) probability. -GCS use in Bitcoin +# GCS use in Bitcoin GCS filters are a proposed mechanism for storing and transmitting per-block filters in Bitcoin. The usage is intended to be the inverse of Bloom filters: diff --git a/btcutil/gcs/gcs.go b/btcutil/gcs/gcs.go index fbffb06131..fca315d6db 100644 --- a/btcutil/gcs/gcs.go +++ b/btcutil/gcs/gcs.go @@ -44,7 +44,7 @@ const ( // described in: // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ // -// * v * N >> log_2(N) +// v * N >> log_2(N) // // In our case, using 64-bit integers, log_2 is 64. As most processors don't // support 128-bit arithmetic natively, we'll be super portable and unfold the diff --git a/btcutil/go.mod b/btcutil/go.mod index b03318a461..9718e6d5fd 100644 --- a/btcutil/go.mod +++ b/btcutil/go.mod @@ -4,13 +4,11 @@ go 1.16 require ( github.com/aead/siphash v1.0.1 - github.com/btcsuite/btcd v0.23.0 + github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd github.com/btcsuite/btcd/btcec/v2 v2.1.3 - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) - -replace github.com/btcsuite/btcd => ../ diff --git a/btcutil/go.sum b/btcutil/go.sum index 8d35bdaf86..a57a5dd920 100644 --- a/btcutil/go.sum +++ b/btcutil/go.sum @@ -1,17 +1,34 @@ github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.4 h1:IzV6qqkfwbItOS/sg/aDfPDsjPP8twrCOE2R93hxMlQ= +github.com/btcsuite/btcd v0.23.4/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -19,6 +36,7 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/lru v1.0.0 h1:Kbsb1SFDsIlaupWPwsPp+dkxiBY1frcS07PCPgotKz8= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -34,15 +52,22 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -52,9 +77,11 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -85,6 +112,7 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= diff --git a/btcutil/hdkeychain/doc.go b/btcutil/hdkeychain/doc.go index dcf74f6b51..094bcdd646 100644 --- a/btcutil/hdkeychain/doc.go +++ b/btcutil/hdkeychain/doc.go @@ -6,7 +6,7 @@ Package hdkeychain provides an API for bitcoin hierarchical deterministic extended keys (BIP0032). -Overview +# Overview The ability to implement hierarchical deterministic wallets depends on the ability to create and derive hierarchical deterministic extended keys. @@ -16,19 +16,19 @@ deterministic extended keys by providing an ExtendedKey type and supporting functions. Each extended key can either be a private or public extended key which itself is capable of deriving a child extended key. -Determining the Extended Key Type +# Determining the Extended Key Type Whether an extended key is a private or public extended key can be determined with the IsPrivate function. -Transaction Signing Keys and Payment Addresses +# Transaction Signing Keys and Payment Addresses In order to create and sign transactions, or provide others with addresses to send funds to, the underlying key and address material must be accessible. This package provides the ECPubKey, ECPrivKey, and Address functions for this purpose. -The Master Node +# The Master Node As previously mentioned, the extended keys are hierarchical meaning they are used to form a tree. The root of that tree is called the master node and this @@ -36,7 +36,7 @@ package provides the NewMaster function to create it from a cryptographically random seed. The GenerateSeed function is provided as a convenient way to create a random seed for use with the NewMaster function. -Deriving Children +# Deriving Children Once you have created a tree root (or have deserialized an extended key as discussed later), the child extended keys can be derived by using the Derive @@ -46,7 +46,7 @@ the HardenedKeyStart constant + the hardened key number as the index to the Derive function. This provides the ability to cascade the keys into a tree and hence generate the hierarchical deterministic key chains. -Normal vs Hardened Derived Extended Keys +# Normal vs Hardened Derived Extended Keys A private extended key can be used to derive both hardened and non-hardened (normal) child private and public extended keys. A public extended key can only @@ -59,22 +59,23 @@ the reason for the existence of hardened keys, and why they are used for the account level in the tree. This way, a leak of an account-specific (or below) private key never risks compromising the master or other accounts." -Neutering a Private Extended Key +# Neutering a Private Extended Key A private extended key can be converted to a new instance of the corresponding public extended key with the Neuter function. The original extended key is not modified. A public extended key is still capable of deriving non-hardened child public extended keys. -Serializing and Deserializing Extended Keys +# Serializing and Deserializing Extended Keys Extended keys are serialized and deserialized with the String and NewKeyFromString functions. The serialized key is a Base58-encoded string which looks like the following: + public key: xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw private key: xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7 -Network +# Network Extended keys are much like normal Bitcoin addresses in that they have version bytes which tie them to a specific network. The SetNet and IsForNet functions diff --git a/btcutil/hdkeychain/example_test.go b/btcutil/hdkeychain/example_test.go index 7489d387a7..8ea4244df1 100644 --- a/btcutil/hdkeychain/example_test.go +++ b/btcutil/hdkeychain/example_test.go @@ -7,8 +7,8 @@ package hdkeychain_test import ( "fmt" - "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/btcutil/hdkeychain" + "github.com/btcsuite/btcd/chaincfg" ) // This example demonstrates how to generate a cryptographically random seed diff --git a/btcutil/hdkeychain/extendedkey.go b/btcutil/hdkeychain/extendedkey.go index c44e6a18b4..0bbb1e7d19 100644 --- a/btcutil/hdkeychain/extendedkey.go +++ b/btcutil/hdkeychain/extendedkey.go @@ -517,8 +517,9 @@ func (k *ExtendedKey) Neuter() (*ExtendedKey, error) { // on the SLIP132 standard (serializable to yprv/ypub, zprv/zpub, etc.). // // References: -// [SLIP132]: SLIP-0132 - Registered HD version bytes for BIP-0032 -// https://github.com/satoshilabs/slips/blob/master/slip-0132.md +// +// [SLIP132]: SLIP-0132 - Registered HD version bytes for BIP-0032 +// https://github.com/satoshilabs/slips/blob/master/slip-0132.md func (k *ExtendedKey) CloneWithVersion(version []byte) (*ExtendedKey, error) { if len(version) != 4 { // TODO: The semantically correct error to return here is diff --git a/btcutil/hdkeychain/extendedkey_test.go b/btcutil/hdkeychain/extendedkey_test.go index 0721b92480..05ec2d6d37 100644 --- a/btcutil/hdkeychain/extendedkey_test.go +++ b/btcutil/hdkeychain/extendedkey_test.go @@ -1095,7 +1095,8 @@ func TestMaximumDepth(t *testing.T) { // extended keys. // // The following tool was used for generating the tests: -// https://jlopp.github.io/xpub-converter +// +// https://jlopp.github.io/xpub-converter func TestCloneWithVersion(t *testing.T) { tests := []struct { name string diff --git a/btcutil/net.go b/btcutil/net.go index bf11733c64..ec5638622b 100644 --- a/btcutil/net.go +++ b/btcutil/net.go @@ -2,6 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package btcutil diff --git a/btcutil/net_noop.go b/btcutil/net_noop.go index b0b7c2e40a..ae9c1f5fb9 100644 --- a/btcutil/net_noop.go +++ b/btcutil/net_noop.go @@ -2,6 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package btcutil diff --git a/btcutil/psbt/bip32.go b/btcutil/psbt/bip32.go index 6b22dc0654..96a3f67274 100644 --- a/btcutil/psbt/bip32.go +++ b/btcutil/psbt/bip32.go @@ -13,7 +13,7 @@ type Bip32Derivation struct { // PubKey is the raw pubkey serialized in compressed format. PubKey []byte - // MasterKeyFingerprint is the finger print of the master pubkey. + // MasterKeyFingerprint is the fingerprint of the master pubkey. MasterKeyFingerprint uint32 // Bip32Path is the BIP 32 path with child index as a distinct integer. @@ -36,10 +36,10 @@ func (s Bip32Sorter) Less(i, j int) bool { return bytes.Compare(s[i].PubKey, s[j].PubKey) < 0 } -// readBip32Derivation deserializes a byte slice containing chunks of 4 byte +// ReadBip32Derivation deserializes a byte slice containing chunks of 4 byte // little endian encodings of uint32 values, the first of which is the // masterkeyfingerprint and the remainder of which are the derivation path. -func readBip32Derivation(path []byte) (uint32, []uint32, error) { +func ReadBip32Derivation(path []byte) (uint32, []uint32, error) { // BIP-0174 defines the derivation path being encoded as // "<32-bit uint> <32-bit uint>*" // with the asterisk meaning 0 to n times. Which in turn means that an diff --git a/btcutil/psbt/extractor.go b/btcutil/psbt/extractor.go index dc7f10fddb..365e2f1bba 100644 --- a/btcutil/psbt/extractor.go +++ b/btcutil/psbt/extractor.go @@ -61,13 +61,14 @@ func Extract(p *Packet) (*wire.MsgTx, error) { return nil, err } - // Now that we know how may inputs we'll need, we'll + // Now that we know how many inputs we'll need, we'll // construct a packing slice, then read out each input // (with a varint prefix) from the witnessReader. tin.Witness = make(wire.TxWitness, witCount) for j := uint64(0); j < witCount; j++ { wit, err := wire.ReadVarBytes( - witnessReader, 0, txscript.MaxScriptSize, "witness", + witnessReader, 0, + txscript.MaxScriptSize, "witness", ) if err != nil { return nil, err diff --git a/btcutil/psbt/finalizer.go b/btcutil/psbt/finalizer.go index 8c50a94b48..3c2edd5557 100644 --- a/btcutil/psbt/finalizer.go +++ b/btcutil/psbt/finalizer.go @@ -14,6 +14,8 @@ package psbt import ( "bytes" "fmt" + + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" ) @@ -462,7 +464,9 @@ func finalizeWitnessInput(p *Packet, inIndex int) error { return ErrNotFinalizable } - serializedWitness, err = writePKHWitness(sigs[0], pubKeys[0]) + serializedWitness, err = writePKHWitness( + sigs[0], pubKeys[0], + ) if err != nil { return err } @@ -516,7 +520,20 @@ func finalizeTaprootInput(p *Packet, inIndex int) error { switch { // Key spend path. case len(pInput.TaprootKeySpendSig) > 0: - serializedWitness, err = writeWitness(pInput.TaprootKeySpendSig) + sig := pInput.TaprootKeySpendSig + + // Make sure TaprootKeySpendSig is equal to size of signature, + // if not, we assume that sighash flag was appended to the + // signature. + if len(pInput.TaprootKeySpendSig) == schnorr.SignatureSize { + // Append to the signature if flag is not equal to the + // default sighash (that can be omitted). + if pInput.SighashType != txscript.SigHashDefault { + sigHashType := byte(pInput.SighashType) + sig = append(sig, sigHashType) + } + } + serializedWitness, err = writeWitness(sig) // Script spend path. case len(pInput.TaprootScriptSpendSig) > 0: diff --git a/btcutil/psbt/go.mod b/btcutil/psbt/go.mod index 80f57fc1b3..81ccca266b 100644 --- a/btcutil/psbt/go.mod +++ b/btcutil/psbt/go.mod @@ -3,10 +3,10 @@ module github.com/btcsuite/btcd/btcutil/psbt go 1.17 require ( - github.com/btcsuite/btcd v0.23.0 + github.com/btcsuite/btcd v0.23.5-0.20231219003633-4c2ce6daed8f github.com/btcsuite/btcd/btcec/v2 v2.1.3 - github.com/btcsuite/btcd/btcutil v1.1.0 - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 + github.com/btcsuite/btcd/btcutil v1.1.4 + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/stretchr/testify v1.7.0 ) @@ -20,7 +20,3 @@ require ( golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) - -replace github.com/btcsuite/btcd/btcutil => ../ - -replace github.com/btcsuite/btcd => ../.. diff --git a/btcutil/psbt/go.sum b/btcutil/psbt/go.sum index a901223de4..74a2ce8a3d 100644 --- a/btcutil/psbt/go.sum +++ b/btcutil/psbt/go.sum @@ -1,14 +1,31 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.23.5-0.20231219003633-4c2ce6daed8f h1:E+dQ8sNtK/lOdfeflUKkRLXe/zW7I333C7HhaoASjZA= +github.com/btcsuite/btcd v0.23.5-0.20231219003633-4c2ce6daed8f/go.mod h1:KVEB81PybLGYzpf1db/kKNi1ZEbUsiVGeTGhKuOl5AM= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.4 h1:mWvWRLRIPuoeZsVRpc0xNCkfeNxWy1E4jIZ06ZpGI1A= +github.com/btcsuite/btcd/btcutil v1.1.4/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -30,14 +47,19 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -46,9 +68,11 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -80,6 +104,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= diff --git a/btcutil/psbt/partial_input.go b/btcutil/psbt/partial_input.go index 7686c451cb..73595d2513 100644 --- a/btcutil/psbt/partial_input.go +++ b/btcutil/psbt/partial_input.go @@ -37,9 +37,7 @@ type PInput struct { // NOTE: Only one of the two arguments should be specified, with the other // being `nil`; otherwise the created PsbtInput object will fail IsSane() // checks and will not be usable. -func NewPsbtInput(nonWitnessUtxo *wire.MsgTx, - witnessUtxo *wire.TxOut) *PInput { - +func NewPsbtInput(nonWitnessUtxo *wire.MsgTx, witnessUtxo *wire.TxOut) *PInput { return &PInput{ NonWitnessUtxo: nonWitnessUtxo, WitnessUtxo: witnessUtxo, @@ -57,7 +55,6 @@ func NewPsbtInput(nonWitnessUtxo *wire.MsgTx, // IsSane returns true only if there are no conflicting values in the Psbt // PInput. For segwit v0 no checks are currently implemented. func (pi *PInput) IsSane() bool { - // TODO(guggero): Implement sanity checks for segwit v1. For segwit v0 // it is unsafe to only rely on the witness UTXO so we don't check that // only one is set anymore. @@ -69,12 +66,12 @@ func (pi *PInput) IsSane() bool { // deserialize attempts to deserialize a new PInput from the passed io.Reader. func (pi *PInput) deserialize(r io.Reader) error { for { - keyint, keydata, err := getKey(r) + keyCode, keyData, err := getKey(r) if err != nil { return err } - if keyint == -1 { - // Reached separator byte + if keyCode == -1 { + // Reached separator byte, this section is done. break } value, err := wire.ReadVarBytes( @@ -84,14 +81,14 @@ func (pi *PInput) deserialize(r io.Reader) error { return err } - switch InputType(keyint) { + switch InputType(keyCode) { case NonWitnessUtxoType: if pi.NonWitnessUtxo != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } tx := wire.NewMsgTx(2) @@ -105,8 +102,8 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.WitnessUtxo != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } txout, err := readTxOut(value) if err != nil { @@ -116,7 +113,7 @@ func (pi *PInput) deserialize(r io.Reader) error { case PartialSigType: newPartialSig := PartialSig{ - PubKey: keydata, + PubKey: keyData, Signature: value, } @@ -124,7 +121,7 @@ func (pi *PInput) deserialize(r io.Reader) error { return ErrInvalidPsbtFormat } - // Duplicate keys are not allowed + // Duplicate keys are not allowed. for _, x := range pi.PartialSigs { if bytes.Equal(x.PubKey, newPartialSig.PubKey) { return ErrDuplicateKey @@ -137,27 +134,27 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.SighashType != 0 { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } - // Bounds check on value here since the sighash type must be a - // 32-bit unsigned integer. + // Bounds check on value here since the sighash type + // must be a 32-bit unsigned integer. if len(value) != 4 { - return ErrInvalidKeydata + return ErrInvalidKeyData } - shtype := txscript.SigHashType( + sighashType := txscript.SigHashType( binary.LittleEndian.Uint32(value), ) - pi.SighashType = shtype + pi.SighashType = sighashType case RedeemScriptInputType: if pi.RedeemScript != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } pi.RedeemScript = value @@ -165,23 +162,25 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.WitnessScript != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } pi.WitnessScript = value case Bip32DerivationInputType: - if !validatePubkey(keydata) { + if !validatePubkey(keyData) { return ErrInvalidPsbtFormat } - master, derivationPath, err := readBip32Derivation(value) + master, derivationPath, err := ReadBip32Derivation( + value, + ) if err != nil { return err } // Duplicate keys are not allowed for _, x := range pi.Bip32Derivation { - if bytes.Equal(x.PubKey, keydata) { + if bytes.Equal(x.PubKey, keyData) { return ErrDuplicateKey } } @@ -189,7 +188,7 @@ func (pi *PInput) deserialize(r io.Reader) error { pi.Bip32Derivation = append( pi.Bip32Derivation, &Bip32Derivation{ - PubKey: keydata, + PubKey: keyData, MasterKeyFingerprint: master, Bip32Path: derivationPath, }, @@ -199,8 +198,8 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.FinalScriptSig != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } pi.FinalScriptSig = value @@ -209,8 +208,8 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.FinalScriptWitness != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } pi.FinalScriptWitness = value @@ -219,26 +218,26 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.TaprootKeySpendSig != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } // The signature can either be 64 or 65 bytes. switch { case len(value) == schnorrSigMinLength: if !validateSchnorrSignature(value) { - return ErrInvalidKeydata + return ErrInvalidKeyData } case len(value) == schnorrSigMaxLength: if !validateSchnorrSignature( value[0:schnorrSigMinLength], ) { - return ErrInvalidKeydata + return ErrInvalidKeyData } default: - return ErrInvalidKeydata + return ErrInvalidKeyData } pi.TaprootKeySpendSig = value @@ -246,13 +245,13 @@ func (pi *PInput) deserialize(r io.Reader) error { case TaprootScriptSpendSignatureType: // The key data for the script spend signature is: // - if len(keydata) != 32*2 { - return ErrInvalidKeydata + if len(keyData) != 32*2 { + return ErrInvalidKeyData } newPartialSig := TaprootScriptSpendSig{ - XOnlyPubKey: keydata[:32], - LeafHash: keydata[32:], + XOnlyPubKey: keyData[:32], + LeafHash: keyData[32:], } // The signature can either be 64 or 65 bytes. @@ -268,11 +267,11 @@ func (pi *PInput) deserialize(r io.Reader) error { ) default: - return ErrInvalidKeydata + return ErrInvalidKeyData } if !newPartialSig.checkValid() { - return ErrInvalidKeydata + return ErrInvalidKeyData } // Duplicate keys are not allowed. @@ -288,11 +287,11 @@ func (pi *PInput) deserialize(r io.Reader) error { case TaprootLeafScriptType: if len(value) < 1 { - return ErrInvalidKeydata + return ErrInvalidKeyData } newLeafScript := TaprootTapLeafScript{ - ControlBlock: keydata, + ControlBlock: keyData, Script: value[:len(value)-1], LeafVersion: txscript.TapscriptLeafVersion( value[len(value)-1], @@ -300,7 +299,7 @@ func (pi *PInput) deserialize(r io.Reader) error { } if !newLeafScript.checkValid() { - return ErrInvalidKeydata + return ErrInvalidKeyData } // Duplicate keys are not allowed. @@ -318,12 +317,12 @@ func (pi *PInput) deserialize(r io.Reader) error { ) case TaprootBip32DerivationInputType: - if !validateXOnlyPubkey(keydata) { - return ErrInvalidKeydata + if !validateXOnlyPubkey(keyData) { + return ErrInvalidKeyData } - taprootDerivation, err := readTaprootBip32Derivation( - keydata, value, + taprootDerivation, err := ReadTaprootBip32Derivation( + keyData, value, ) if err != nil { return err @@ -331,7 +330,7 @@ func (pi *PInput) deserialize(r io.Reader) error { // Duplicate keys are not allowed. for _, x := range pi.TaprootBip32Derivation { - if bytes.Equal(x.XOnlyPubKey, keydata) { + if bytes.Equal(x.XOnlyPubKey, keyData) { return ErrDuplicateKey } } @@ -344,12 +343,12 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.TaprootInternalKey != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } if !validateXOnlyPubkey(value) { - return ErrInvalidKeydata + return ErrInvalidKeyData } pi.TaprootInternalKey = value @@ -358,25 +357,27 @@ func (pi *PInput) deserialize(r io.Reader) error { if pi.TaprootMerkleRoot != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } pi.TaprootMerkleRoot = value default: // A fall through case for any proprietary types. - keyintanddata := []byte{byte(keyint)} - keyintanddata = append(keyintanddata, keydata...) + keyCodeAndData := append( + []byte{byte(keyCode)}, keyData..., + ) newUnknown := &Unknown{ - Key: keyintanddata, + Key: keyCodeAndData, Value: value, } - // Duplicate key+keydata are not allowed + // Duplicate key+keyData are not allowed. for _, x := range pi.Unknowns { if bytes.Equal(x.Key, newUnknown.Key) && bytes.Equal(x.Value, newUnknown.Value) { + return ErrDuplicateKey } } @@ -390,7 +391,6 @@ func (pi *PInput) deserialize(r io.Reader) error { // serialize attempts to serialize the target PInput into the passed io.Writer. func (pi *PInput) serialize(w io.Writer) error { - if !pi.IsSane() { return ErrInvalidPsbtFormat } @@ -538,7 +538,7 @@ func (pi *PInput) serialize(w io.Writer) error { ) }) for _, derivation := range pi.TaprootBip32Derivation { - value, err := serializeTaprootBip32Derivation( + value, err := SerializeTaprootBip32Derivation( derivation, ) if err != nil { @@ -593,7 +593,7 @@ func (pi *PInput) serialize(w io.Writer) error { } // Unknown is a special case; we don't have a key type, only a key and - // a value field + // a value field. for _, kv := range pi.Unknowns { err := serializeKVpair(w, kv.Key, kv.Value) if err != nil { diff --git a/btcutil/psbt/partial_output.go b/btcutil/psbt/partial_output.go index 33b5ff9981..86e476457d 100644 --- a/btcutil/psbt/partial_output.go +++ b/btcutil/psbt/partial_output.go @@ -17,6 +17,7 @@ type POutput struct { TaprootInternalKey []byte TaprootTapTree []byte TaprootBip32Derivation []*TaprootBip32Derivation + Unknowns []*Unknown } // NewPsbtOutput creates an instance of PsbtOutput; the three parameters @@ -34,12 +35,12 @@ func NewPsbtOutput(redeemScript []byte, witnessScript []byte, // deserialize attempts to recode a new POutput from the passed io.Reader. func (po *POutput) deserialize(r io.Reader) error { for { - keyint, keydata, err := getKey(r) + keyCode, keyData, err := getKey(r) if err != nil { return err } - if keyint == -1 { - // Reached separator byte + if keyCode == -1 { + // Reached separator byte, this section is done. break } @@ -50,14 +51,14 @@ func (po *POutput) deserialize(r io.Reader) error { return err } - switch OutputType(keyint) { + switch OutputType(keyCode) { case RedeemScriptOutputType: if po.RedeemScript != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } po.RedeemScript = value @@ -65,30 +66,32 @@ func (po *POutput) deserialize(r io.Reader) error { if po.WitnessScript != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } po.WitnessScript = value case Bip32DerivationOutputType: - if !validatePubkey(keydata) { - return ErrInvalidKeydata + if !validatePubkey(keyData) { + return ErrInvalidKeyData } - master, derivationPath, err := readBip32Derivation(value) + master, derivationPath, err := ReadBip32Derivation( + value, + ) if err != nil { return err } - // Duplicate keys are not allowed + // Duplicate keys are not allowed. for _, x := range po.Bip32Derivation { - if bytes.Equal(x.PubKey, keydata) { + if bytes.Equal(x.PubKey, keyData) { return ErrDuplicateKey } } po.Bip32Derivation = append(po.Bip32Derivation, &Bip32Derivation{ - PubKey: keydata, + PubKey: keyData, MasterKeyFingerprint: master, Bip32Path: derivationPath, }, @@ -98,12 +101,12 @@ func (po *POutput) deserialize(r io.Reader) error { if po.TaprootInternalKey != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } if !validateXOnlyPubkey(value) { - return ErrInvalidKeydata + return ErrInvalidKeyData } po.TaprootInternalKey = value @@ -112,19 +115,19 @@ func (po *POutput) deserialize(r io.Reader) error { if po.TaprootTapTree != nil { return ErrDuplicateKey } - if keydata != nil { - return ErrInvalidKeydata + if keyData != nil { + return ErrInvalidKeyData } po.TaprootTapTree = value case TaprootBip32DerivationOutputType: - if !validateXOnlyPubkey(keydata) { - return ErrInvalidKeydata + if !validateXOnlyPubkey(keyData) { + return ErrInvalidKeyData } - taprootDerivation, err := readTaprootBip32Derivation( - keydata, value, + taprootDerivation, err := ReadTaprootBip32Derivation( + keyData, value, ) if err != nil { return err @@ -132,7 +135,7 @@ func (po *POutput) deserialize(r io.Reader) error { // Duplicate keys are not allowed. for _, x := range po.TaprootBip32Derivation { - if bytes.Equal(x.XOnlyPubKey, keydata) { + if bytes.Equal(x.XOnlyPubKey, keyData) { return ErrDuplicateKey } } @@ -142,8 +145,25 @@ func (po *POutput) deserialize(r io.Reader) error { ) default: - // Unknown type is allowed for inputs but not outputs. - return ErrInvalidPsbtFormat + // A fall through case for any proprietary types. + keyCodeAndData := append( + []byte{byte(keyCode)}, keyData..., + ) + newUnknown := &Unknown{ + Key: keyCodeAndData, + Value: value, + } + + // Duplicate key+keyData are not allowed. + for _, x := range po.Unknowns { + if bytes.Equal(x.Key, newUnknown.Key) && + bytes.Equal(x.Value, newUnknown.Value) { + + return ErrDuplicateKey + } + } + + po.Unknowns = append(po.Unknowns, newUnknown) } } @@ -211,7 +231,7 @@ func (po *POutput) serialize(w io.Writer) error { ) }) for _, derivation := range po.TaprootBip32Derivation { - value, err := serializeTaprootBip32Derivation( + value, err := SerializeTaprootBip32Derivation( derivation, ) if err != nil { @@ -226,5 +246,14 @@ func (po *POutput) serialize(w io.Writer) error { } } + // Unknown is a special case; we don't have a key type, only a key and + // a value field + for _, kv := range po.Unknowns { + err := serializeKVpair(w, kv.Key, kv.Value) + if err != nil { + return err + } + } + return nil } diff --git a/btcutil/psbt/psbt.go b/btcutil/psbt/psbt.go index 5ea51eead0..964061bdc5 100644 --- a/btcutil/psbt/psbt.go +++ b/btcutil/psbt/psbt.go @@ -11,9 +11,9 @@ import ( "bytes" "encoding/base64" "errors" - "io" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/wire" ) @@ -22,7 +22,7 @@ import ( const psbtMagicLength = 5 var ( - // psbtMagic is the separator + // psbtMagic is the separator. psbtMagic = [psbtMagicLength]byte{0x70, 0x73, 0x62, 0x74, 0xff, // = "psbt" + 0xff sep } @@ -30,11 +30,11 @@ var ( // MaxPsbtValueLength is the size of the largest transaction serialization // that could be passed in a NonWitnessUtxo field. This is definitely -//less than 4M. +// less than 4M. const MaxPsbtValueLength = 4000000 // MaxPsbtKeyLength is the length of the largest key that we'll successfully -// deserialize from the wire. Anything more will return ErrInvalidKeydata. +// deserialize from the wire. Anything more will return ErrInvalidKeyData. const MaxPsbtKeyLength = 10000 var ( @@ -47,40 +47,41 @@ var ( // due to having the same key repeated in the same key-value pair. ErrDuplicateKey = errors.New("Invalid Psbt due to duplicate key") - // ErrInvalidKeydata indicates that a key-value pair in the PSBT + // ErrInvalidKeyData indicates that a key-value pair in the PSBT // serialization contains data in the key which is not valid. - ErrInvalidKeydata = errors.New("Invalid key data") + ErrInvalidKeyData = errors.New("Invalid key data") - // ErrInvalidMagicBytes indicates that a passed Psbt serialization is invalid - // due to having incorrect magic bytes. - ErrInvalidMagicBytes = errors.New("Invalid Psbt due to incorrect magic bytes") + // ErrInvalidMagicBytes indicates that a passed Psbt serialization is + // invalid due to having incorrect magic bytes. + ErrInvalidMagicBytes = errors.New("Invalid Psbt due to incorrect " + + "magic bytes") - // ErrInvalidRawTxSigned indicates that the raw serialized transaction in the - // global section of the passed Psbt serialization is invalid because it - // contains scriptSigs/witnesses (i.e. is fully or partially signed), which - // is not allowed by BIP174. - ErrInvalidRawTxSigned = errors.New("Invalid Psbt, raw transaction must " + - "be unsigned.") + // ErrInvalidRawTxSigned indicates that the raw serialized transaction + // in the global section of the passed Psbt serialization is invalid + // because it contains scriptSigs/witnesses (i.e. is fully or partially + // signed), which is not allowed by BIP174. + ErrInvalidRawTxSigned = errors.New("Invalid Psbt, raw transaction " + + "must be unsigned.") // ErrInvalidPrevOutNonWitnessTransaction indicates that the transaction // hash (i.e. SHA256^2) of the fully serialized previous transaction - // provided in the NonWitnessUtxo key-value field doesn't match the prevout - // hash in the UnsignedTx field in the PSBT itself. - ErrInvalidPrevOutNonWitnessTransaction = errors.New("Prevout hash does " + - "not match the provided non-witness utxo serialization") + // provided in the NonWitnessUtxo key-value field doesn't match the + // prevout hash in the UnsignedTx field in the PSBT itself. + ErrInvalidPrevOutNonWitnessTransaction = errors.New("Prevout hash " + + "does not match the provided non-witness utxo serialization") // ErrInvalidSignatureForInput indicates that the signature the user is // trying to append to the PSBT is invalid, either because it does // not correspond to the previous transaction hash, or redeem script, // or witness script. // NOTE this does not include ECDSA signature checking. - ErrInvalidSignatureForInput = errors.New("Signature does not correspond " + - "to this input") + ErrInvalidSignatureForInput = errors.New("Signature does not " + + "correspond to this input") - // ErrInputAlreadyFinalized indicates that the PSBT passed to a Finalizer - // already contains the finalized scriptSig or witness. - ErrInputAlreadyFinalized = errors.New("Cannot finalize PSBT, finalized " + - "scriptSig or scriptWitnes already exists") + // ErrInputAlreadyFinalized indicates that the PSBT passed to a + // Finalizer already contains the finalized scriptSig or witness. + ErrInputAlreadyFinalized = errors.New("Cannot finalize PSBT, " + + "finalized scriptSig or scriptWitnes already exists") // ErrIncompletePSBT indicates that the Extractor object // was unable to successfully extract the passed Psbt struct because @@ -99,8 +100,8 @@ var ( ErrInvalidSigHashFlags = errors.New("Invalid Sighash Flags") // ErrUnsupportedScriptType indicates that the redeem script or - // scriptwitness given is not supported by this codebase, or is otherwise - // not valid. + // script witness given is not supported by this codebase, or is + // otherwise not valid. ErrUnsupportedScriptType = errors.New("Unsupported script type") ) @@ -112,7 +113,7 @@ type Unknown struct { Value []byte } -// Packet is the actual psbt repreesntation. It is a is a set of 1 + N + M +// Packet is the actual psbt representation. It is a set of 1 + N + M // key-value pair lists, 1 global, defining the unsigned transaction structure // with N inputs and M outputs. These key-value pairs can contain scripts, // signatures, key derivations and other transaction-defining data. @@ -129,7 +130,7 @@ type Packet struct { Outputs []POutput // Unknowns are the set of custom types (global only) within this PSBT. - Unknowns []Unknown + Unknowns []*Unknown } // validateUnsignedTx returns true if the transaction is unsigned. Note that @@ -148,23 +149,20 @@ func validateUnsignedTX(tx *wire.MsgTx) bool { // NewFromUnsignedTx creates a new Psbt struct, without any signatures (i.e. // only the global section is non-empty) using the passed unsigned transaction. func NewFromUnsignedTx(tx *wire.MsgTx) (*Packet, error) { - if !validateUnsignedTX(tx) { return nil, ErrInvalidRawTxSigned } inSlice := make([]PInput, len(tx.TxIn)) outSlice := make([]POutput, len(tx.TxOut)) - unknownSlice := make([]Unknown, 0) + unknownSlice := make([]*Unknown, 0) - retPsbt := Packet{ + return &Packet{ UnsignedTx: tx, Inputs: inSlice, Outputs: outSlice, Unknowns: unknownSlice, - } - - return &retPsbt, nil + }, nil } // NewFromRawBytes returns a new instance of a Packet struct created by reading @@ -175,7 +173,6 @@ func NewFromUnsignedTx(tx *wire.MsgTx) (*Packet, error) { // NOTE: To create a Packet from one's own data, rather than reading in a // serialization from a counterparty, one should use a psbt.New. func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { - // If the PSBT is encoded in bas64, then we'll create a new wrapper // reader that'll allow us to incrementally decode the contents of the // io.Reader. @@ -197,11 +194,11 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { // Next we parse the GLOBAL section. There is currently only 1 known // key type, UnsignedTx. We insist this exists first; unknowns are // allowed, but only after. - keyint, keydata, err := getKey(r) + keyCode, keyData, err := getKey(r) if err != nil { return nil, err } - if GlobalType(keyint) != UnsignedTxType || keydata != nil { + if GlobalType(keyCode) != UnsignedTxType || keyData != nil { return nil, ErrInvalidPsbtFormat } @@ -227,7 +224,7 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { // Next we parse any unknowns that may be present, making sure that we // break at the separator. - var unknownSlice []Unknown + var unknownSlice []*Unknown for { keyint, keydata, err := getKey(r) if err != nil { @@ -247,7 +244,7 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { keyintanddata := []byte{byte(keyint)} keyintanddata = append(keyintanddata, keydata...) - newUnknown := Unknown{ + newUnknown := &Unknown{ Key: keyintanddata, Value: value, } @@ -278,7 +275,7 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { outSlice[i] = output } - // Populate the new Packet object + // Populate the new Packet object. newPsbt := Packet{ UnsignedTx: msgTx, Inputs: inSlice, @@ -298,7 +295,6 @@ func NewFromRawBytes(r io.Reader, b64 bool) (*Packet, error) { // Serialize creates a binary serialization of the referenced Packet struct // with lexicographical ordering (by key) of the subsections. func (p *Packet) Serialize(w io.Writer) error { - // First we write out the precise set of magic bytes that identify a // valid PSBT transaction. if _, err := w.Write(psbtMagic[:]); err != nil { @@ -323,6 +319,15 @@ func (p *Packet) Serialize(w io.Writer) error { return err } + // Unknown is a special case; we don't have a key type, only a key and + // a value field + for _, kv := range p.Unknowns { + err := serializeKVpair(w, kv.Key, kv.Value) + if err != nil { + return err + } + } + // With that our global section is done, so we'll write out the // separator. separator := []byte{0x00} @@ -382,7 +387,6 @@ func (p *Packet) IsComplete() bool { // SanityCheck checks conditions on a PSBT to ensure that it obeys the // rules of BIP174, and returns true if so, false if not. func (p *Packet) SanityCheck() error { - if !validateUnsignedTX(p.UnsignedTx) { return ErrInvalidRawTxSigned } @@ -395,3 +399,20 @@ func (p *Packet) SanityCheck() error { return nil } + +// GetTxFee returns the transaction fee. An error is returned if a transaction +// input does not contain any UTXO information. +func (p *Packet) GetTxFee() (btcutil.Amount, error) { + sumInputs, err := SumUtxoInputValues(p) + if err != nil { + return 0, err + } + + var sumOutputs int64 + for _, txOut := range p.UnsignedTx.TxOut { + sumOutputs += txOut.Value + } + + fee := sumInputs - sumOutputs + return btcutil.Amount(fee), nil +} diff --git a/btcutil/psbt/psbt_test.go b/btcutil/psbt/psbt_test.go index 1ce4780d84..2309b07e40 100644 --- a/btcutil/psbt/psbt_test.go +++ b/btcutil/psbt/psbt_test.go @@ -749,6 +749,52 @@ func TestPsbtExtractor(t *testing.T) { } } +func TestFinalizerAddSigHashFlags(t *testing.T) { + var signedPsbtData = map[string]string{ + "Default": "70736274ff01005e0200000001f1aabce974f1b242b36913f4f8a9f138a8042914dddc4117a578813a4dc32ee10000000000ffffffff017b0a0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1000000000001012b430b0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1011340e80246ac1955def419572514e50e4be47f56ccd51beae41ec80ad30cb77ed59ebca3c38dd8506e1b7c28fafa4bdf7d821464be1ee152416bdaf2c056fb4fb3290117206b1a4876464d6bfc6a7c106dd4c5a0f08af94b45a8200e47e02a7dc6148fd7b00000", + "All": "70736274ff01005e020000000193e988e9eebfe51c0f362741aaab1e0699175c83cfd8087c4a06e24e3b80bc220000000000ffffffff019b0d0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1000000000001012b630e0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad101030401000000011340ee0a03b010e515e38553d4d96c65a9d6092d06756c47c16c5674c3bde6ad0c151f6d4074601f3c2967f12c3b624b4013591e65458a8b5f80b96a613132cee3bb0117206b1a4876464d6bfc6a7c106dd4c5a0f08af94b45a8200e47e02a7dc6148fd7b00000", + "None": "70736274ff01005e02000000013cfe0f5fd1b9a73230b003d336b5e4d7abf3452f6a5c4f266c434648a161eb170000000000ffffffff01d30c0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1000000000001012b9b0d0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad10103040200000001134032573ce8ee8a9afac2008bcb45ce7f96ac95ee7ffad26d10388c97fb87f76f77dc414224ca98b01cbec361488ac29d11e018be412d2725be85dfe5c3fd3b6b4c0117206b1a4876464d6bfc6a7c106dd4c5a0f08af94b45a8200e47e02a7dc6148fd7b00000", + "Single": "70736274ff01005e02000000013173659bb6be7474b8d00efd3b38f2a225f5591bd4edd873170a1e0ff0ef15990000000000ffffffff01630e0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1000000000001012b2b0f0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad101030403000000011340251ce90a8b36cd90bf430f9522772b09bd3ef90039e53cddc5bda6abb61f1c11db6505683d0b7778d4444549ae71df5012edb859251abca13bd819fa6ac9d6ac0117206b1a4876464d6bfc6a7c106dd4c5a0f08af94b45a8200e47e02a7dc6148fd7b00000", + "AllAnyOneCanPay": "70736274ff01005e020000000130ac25ec34af987b9e0518ff05cd491bd2d339660a4bfeea49a580c9233fbd9d0000000000ffffffff010b0c0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1000000000001012bd30c0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad101030481000000011340e86b7ea8d6fc2cbd99b1091c25a2a37b333b5d82ea559579553cf7ba08c0fe3bead26c458f4917a6e069a3712c15f0999adb243603c783133676c1a09cc574b20117206b1a4876464d6bfc6a7c106dd4c5a0f08af94b45a8200e47e02a7dc6148fd7b00000", + "NoneAnyOneCanPay": "70736274ff01005e02000000015499da1d93851a8add52fcab05acab60eaaf16571e0015f678b68775937d11200000000000ffffffff01430b0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1000000000001012b0b0c0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1010304820000000113402d42b46429b739786020e52b69b969468aa69ca40af390ba13441c8e6dc9e53f679c2bd2ff0ef912f48922cd64f4a7bfe7e492e5ecc8603b63e0ea772385faab0117206b1a4876464d6bfc6a7c106dd4c5a0f08af94b45a8200e47e02a7dc6148fd7b00000", + "SingleAnyOneCanPay": "70736274ff01005e02000000011bbe693ee5b3d75a5c8ad190e151c81e5b1ff1090982ea712c375e7d4a6069ce0100000000ffffffff012b0f0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1000000000001012bf30f0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1010304830000000113408e018d0ae9cd730f7eae428a456e920b4ded67c9a7500a82ba25dd23f98418c1f060680daa4352b262fdffab691a4a67fc603352c1d21ace7cc6d83490facde70117206b1a4876464d6bfc6a7c106dd4c5a0f08af94b45a8200e47e02a7dc6148fd7b00000", + } + + var expectedTx = map[string]string{ + "Default": "02000000000101f1aabce974f1b242b36913f4f8a9f138a8042914dddc4117a578813a4dc32ee10000000000ffffffff017b0a0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad10140e80246ac1955def419572514e50e4be47f56ccd51beae41ec80ad30cb77ed59ebca3c38dd8506e1b7c28fafa4bdf7d821464be1ee152416bdaf2c056fb4fb32900000000", + "All": "0200000000010193e988e9eebfe51c0f362741aaab1e0699175c83cfd8087c4a06e24e3b80bc220000000000ffffffff019b0d0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad10141ee0a03b010e515e38553d4d96c65a9d6092d06756c47c16c5674c3bde6ad0c151f6d4074601f3c2967f12c3b624b4013591e65458a8b5f80b96a613132cee3bb0100000000", + "None": "020000000001013cfe0f5fd1b9a73230b003d336b5e4d7abf3452f6a5c4f266c434648a161eb170000000000ffffffff01d30c0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad1014132573ce8ee8a9afac2008bcb45ce7f96ac95ee7ffad26d10388c97fb87f76f77dc414224ca98b01cbec361488ac29d11e018be412d2725be85dfe5c3fd3b6b4c0200000000", + "Single": "020000000001013173659bb6be7474b8d00efd3b38f2a225f5591bd4edd873170a1e0ff0ef15990000000000ffffffff01630e0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad10141251ce90a8b36cd90bf430f9522772b09bd3ef90039e53cddc5bda6abb61f1c11db6505683d0b7778d4444549ae71df5012edb859251abca13bd819fa6ac9d6ac0300000000", + "AllAnyOneCanPay": "0200000000010130ac25ec34af987b9e0518ff05cd491bd2d339660a4bfeea49a580c9233fbd9d0000000000ffffffff010b0c0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad10141e86b7ea8d6fc2cbd99b1091c25a2a37b333b5d82ea559579553cf7ba08c0fe3bead26c458f4917a6e069a3712c15f0999adb243603c783133676c1a09cc574b28100000000", + "NoneAnyOneCanPay": "020000000001015499da1d93851a8add52fcab05acab60eaaf16571e0015f678b68775937d11200000000000ffffffff01430b0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad101412d42b46429b739786020e52b69b969468aa69ca40af390ba13441c8e6dc9e53f679c2bd2ff0ef912f48922cd64f4a7bfe7e492e5ecc8603b63e0ea772385faab8200000000", + "SingleAnyOneCanPay": "020000000001011bbe693ee5b3d75a5c8ad190e151c81e5b1ff1090982ea712c375e7d4a6069ce0100000000ffffffff012b0f0000000000002251209c1f4b7970d790c99b7265b53adec03551708fd7d67db78359f9c472fe642ad101418e018d0ae9cd730f7eae428a456e920b4ded67c9a7500a82ba25dd23f98418c1f060680daa4352b262fdffab691a4a67fc603352c1d21ace7cc6d83490facde78300000000", + } + + for key, signedPsbtStr := range signedPsbtData { + signedPsbtBytes, err := hex.DecodeString(signedPsbtStr) + require.NoErrorf(t, err, "Failed to decode signed psbt string") + + signedPsbt, err := NewFromRawBytes(bytes.NewReader(signedPsbtBytes), false) + require.NoErrorf(t, err, "Failed to parse psbt") + + // There is only one input in each psbt. + err = Finalize(signedPsbt, 0) + require.NoErrorf(t, err, "Failed to finalize") + + tx, err := Extract(signedPsbt) + require.NoErrorf(t, err, "Failed to extract") + + var b bytes.Buffer + err = tx.Serialize(&b) + require.NoErrorf(t, err, "Failed to serialize tx into buffer") + + expectedTxBytes, err := hex.DecodeString(expectedTx[key]) + require.NoErrorf(t, err, "Unable to decode expected tx") + require.Equal(t, expectedTxBytes, b.Bytes()) + } + +} + func TestImportFromCore1(t *testing.T) { // This example #1 was created manually using Bitcoin Core 0.17 regtest. // It contains two inputs, one p2wkh and one p2pkh (non-witness). @@ -1502,3 +1548,28 @@ func TestWitnessForNonWitnessUtxo(t *testing.T) { t.Fatalf("unable to extract funding TX: %v", err) } } + +// TestUnknowns tests that we can parse and serialize unknown fields on all +// three levels (global, input, output). +func TestUnknowns(t *testing.T) { + packetWithUnknowns := "cHNidP8BAIkCAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgUAAAAAAAAAIlEg5i9uUYF8DDqT/1fKz8jKT2g/Gj68P6EjLW6dHbImdM0FAAAAAAAAACJRIHS02KqR/607mTrLCABOVF3rLxVDtOLvAw3JLcL5JIgwAAAAAAFwAQEBcQZ0YXJvcnQAIgYCkGaT9mOyWvyoiwSCb1xgFhRie+Y3nTSmO0QQrAe0q7AYAAAAAPkDAIABAACA2wAAgAAAAAAAAAAAIRaQZpP2Y7Ja/KiLBIJvXGAWFGJ75jedNKY7RBCsB7SrsBkAAAAAAPkDAIABAACA2wAAgAAAAAAAAAAAARcgkGaT9mOyWvyoiwSCb1xgFhRie+Y3nTSmO0QQrAe0q7ABGCBlB87S1Bq/Niu8SdW9U1se7WsumF+1gYZ/00f/WkWGAgFwZX/rKpmW4Iz1ScSX2U2SIv8LN5kLvMWGeI7scXdPH/1uAAAAATanCvuEYVDT4vBfORd+71iC7GijIfGKofjwnXI56U3TAhYyvDW2pIk+islXsY45l27xfgJwWWK+CmkFs+cUptDlAXEIAAAAAAAAA+gBciJRIIBtIlu09Y4lcMgdHz3QhfSVV69iKin6cPxH2JFLTO1jAXMIAAAAAAAAAAABdCECtg44XjZucowo0SQp2YJa0esIwS9Bc1N8CpcddTkDdrQBdSB+nQzzBbHVbtIB0AoMIZvFEQpGG1hdp3D+8eYIu37oUgF2GAAAAAD5AwCAAQAAgNsAAIAAAAAAAQAAAAF3GQAAAAAA+QMAgAEAAIDbAACAAAAAAAEAAAABef2sAgABAAFWQzv2kOxwflKCXy51yDJbmfD7pZRVI1+f2k4j5aRkVX0AAAAACWl0ZXN0YnV4eCJzb21lIG1ldGFkYXRhIGZvciB0aGUgaXRlc3QgYXNzZXRzAAAAAAACAQADAQoG/QIgAf0CHABlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC/QGxSgAB5CgfrndtXUxNHYy61v8ZFC7EVnez4uBSIuSsEug67DIAAAAAAAATfv////////////////////////////////////////+//QFjAAEAAVZDO/aQ7HB+UoJfLnXIMluZ8PullFUjX5/aTiPlpGRVfQAAAAAJaXRlc3RidXh4InNvbWUgbWV0YWRhdGEgZm9yIHRoZSBpdGVzdCBhc3NldHMAAAAAAAIBAAMD/RN+Bq0BqwBldkseYyHTOjpT8WRNj+s5WMuADtDMKW09wG38rhEwM2oAAAAANqcK+4RhUNPi8F85F37vWILsaKMh8Yqh+PCdcjnpTdMCTr1IzgTZHOvZY2+EhzZF1w+HDMMZ2VZ5jDtyuViKWXIBQgFA0KHA0Di7lgqweVLU71eNWoOE759Ec6yFtcw6zVD45yUl8z58/GNb2+xbh/Ou5jfpDAkd4I4wXlafTu3dplTsqAcoHxlrrWtdUR74IMEFKrV3ECvdKAQfH98pZoSlmT1/jQUAAAAAAAATiAgCAAAJIQJhfW7AFTIwW95KKmZWOlJPDjl6ZUyk8uTE4AVS21a0wAgCAAAJIQIWMrw1tqSJPorJV7GOOZdu8X4CcFlivgppBbPnFKbQ5QF6AQEAIgICJzY1cX8foM/D3nXJDsULt45A8PTSWG42lK0rBOqOJrYYAAAAAPkDAIABAACA2wAAgAAAAAACAAAAAQUgJzY1cX8foM/D3nXJDsULt45A8PTSWG42lK0rBOqOJrYhByc2NXF/H6DPw951yQ7FC7eOQPD00lhuNpStKwTqjia2GQAAAAAA+QMAgAEAAIDbAACAAAAAAAIAAAABcAEBAXEBAAFyCAAAAAAAAAAAAXMhAy38VNCuGaPv8LhP6aLaKPFgZC+c5VBOwjrnKR2ReQRCAXQYAAAAAPkDAIABAACA2wAAgAAAAAADAAAAAXUZAAAAAAD5AwCAAQAAgNsAAIAAAAAAAwAAAAF2/WEBAAEAAVZDO/aQ7HB+UoJfLnXIMluZ8PullFUjX5/aTiPlpGRVfQAAAAAJaXRlc3RidXh4InNvbWUgbWV0YWRhdGEgZm9yIHRoZSBpdGVzdCBhc3NldHMAAAAAAAIBAAMBBQatAasAZX/rKpmW4Iz1ScSX2U2SIv8LN5kLvMWGeI7scXdPH/1uAAAAATanCvuEYVDT4vBfORd+71iC7GijIfGKofjwnXI56U3TAhYyvDW2pIk+islXsY45l27xfgJwWWK+CmkFs+cUptDlAUIBQIcR8GQWP8a+XpOIE2KfA844YQQoKuLX18B/Q47cO1MQYzA6SJdDQ3InMTjRxR9STCe5CxnPW9ufpX50GBaV9YIHKHkuFWwwWI5ZxJiPIInqUjmAvRpa9Gi8E4NAW0EtPMAnAAAAAAAAAAoIAgAACSEC5i9uUYF8DDqT/1fKz8jKT2g/Gj68P6EjLW6dHbImdM0AAXABAAFxAQABcggAAAAAAAAAAQFzIQIQiynNQqsCXWFOpFav8EY3PtUvCL3HdwPj0w4MMI1PowF2/aoCAAEAAVZDO/aQ7HB+UoJfLnXIMluZ8PullFUjX5/aTiPlpGRVfQAAAAAJaXRlc3RidXh4InNvbWUgbWV0YWRhdGEgZm9yIHRoZSBpdGVzdCBhc3NldHMAAAAAAAIBAAMBBQb9Ah4B/QIaAGUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL9Aa9KAAEhQAiYnrNk28uUgoU7xUnxAxecle1lVSSbHyT0Xdo8FgAAAAAAAAAF/////////////////////////////////////////3/9AWEAAQABVkM79pDscH5Sgl8udcgyW5nw+6WUVSNfn9pOI+WkZFV9AAAAAAlpdGVzdGJ1eHgic29tZSBtZXRhZGF0YSBmb3IgdGhlIGl0ZXN0IGFzc2V0cwAAAAAAAgEAAwEFBq0BqwBlf+sqmZbgjPVJxJfZTZIi/ws3mQu8xYZ4juxxd08f/W4AAAABNqcK+4RhUNPi8F85F37vWILsaKMh8Yqh+PCdcjnpTdMCFjK8NbakiT6KyVexjjmXbvF+AnBZYr4KaQWz5xSm0OUBQgFAhxHwZBY/xr5ek4gTYp8DzjhhBCgq4tfXwH9Djtw7UxBjMDpIl0NDcicxONHFH1JMJ7kLGc9b25+lfnQYFpX1ggcoeS4VbDBYjlnEmI8giepSOYC9Glr0aLwTg0BbQS08wCcAAAAAAAAACggCAAAJIQLmL25RgXwMOpP/V8rPyMpPaD8aPrw/oSMtbp0dsiZ0zQgCAAAJIQJ0tNiqkf+tO5k6ywgATlRd6y8VQ7Ti7wMNyS3C+SSIMAA=" + + packet, err := NewFromRawBytes( + strings.NewReader(packetWithUnknowns), true, + ) + require.NoError(t, err) + + require.Len(t, packet.Unknowns, 2) + + require.Len(t, packet.Inputs, 1) + require.Len(t, packet.Inputs[0].Unknowns, 10) + + require.Len(t, packet.Outputs, 2) + require.Len(t, packet.Outputs[0].Unknowns, 7) + + // Convert it to base64 again to make sure the fields are also + // serialized. + encoded, err := packet.B64Encode() + require.NoError(t, err) + require.Equal(t, packetWithUnknowns, encoded) +} diff --git a/btcutil/psbt/signer.go b/btcutil/psbt/signer.go index 588265317f..dcbcf93fa3 100644 --- a/btcutil/psbt/signer.go +++ b/btcutil/psbt/signer.go @@ -22,12 +22,12 @@ const ( // attached. SignSuccesful = 0 - // SignFinalized indicates that this input is already finalized, so the provided - // signature was *not* attached + // SignFinalized indicates that this input is already finalized, so the + // provided signature was *not* attached SignFinalized = 1 - // SignInvalid indicates that the provided signature data was not valid. In this case - // an error will also be returned. + // SignInvalid indicates that the provided signature data was not valid. + // In this case an error will also be returned. SignInvalid = -1 ) @@ -73,9 +73,10 @@ func (u *Updater) Sign(inIndex int, sig []byte, pubKey []byte, // // Case 1: if witnessScript is present, it must be of type witness; // if not, signature insertion will of course fail. + pInput := u.Upsbt.Inputs[inIndex] switch { - case u.Upsbt.Inputs[inIndex].WitnessScript != nil: - if u.Upsbt.Inputs[inIndex].WitnessUtxo == nil { + case pInput.WitnessScript != nil: + if pInput.WitnessUtxo == nil { err := nonWitnessToWitness(u.Upsbt, inIndex) if err != nil { return SignInvalid, err @@ -89,12 +90,12 @@ func (u *Updater) Sign(inIndex int, sig []byte, pubKey []byte, // Case 2: no witness script, only redeem script; can be legacy p2sh or // p2sh-wrapped p2wkh. - case u.Upsbt.Inputs[inIndex].RedeemScript != nil: + case pInput.RedeemScript != nil: // We only need to decide if the input is witness, and we don't // rely on the witnessutxo/nonwitnessutxo in the PSBT, instead // we check the redeemScript content. if txscript.IsWitnessProgram(redeemScript) { - if u.Upsbt.Inputs[inIndex].WitnessUtxo == nil { + if pInput.WitnessUtxo == nil { err := nonWitnessToWitness(u.Upsbt, inIndex) if err != nil { return SignInvalid, err @@ -113,9 +114,10 @@ func (u *Updater) Sign(inIndex int, sig []byte, pubKey []byte, // non-p2sh. To check if it's segwit, check the scriptPubKey of the // output. default: - if u.Upsbt.Inputs[inIndex].WitnessUtxo == nil { - outIndex := u.Upsbt.UnsignedTx.TxIn[inIndex].PreviousOutPoint.Index - script := u.Upsbt.Inputs[inIndex].NonWitnessUtxo.TxOut[outIndex].PkScript + if pInput.WitnessUtxo == nil { + txIn := u.Upsbt.UnsignedTx.TxIn[inIndex] + outIndex := txIn.PreviousOutPoint.Index + script := pInput.NonWitnessUtxo.TxOut[outIndex].PkScript if txscript.IsWitnessProgram(script) { err := nonWitnessToWitness(u.Upsbt, inIndex) diff --git a/btcutil/psbt/taproot.go b/btcutil/psbt/taproot.go index 4d0619ec43..b9df860c95 100644 --- a/btcutil/psbt/taproot.go +++ b/btcutil/psbt/taproot.go @@ -2,6 +2,7 @@ package psbt import ( "bytes" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" @@ -92,10 +93,10 @@ func (s *TaprootBip32Derivation) SortBefore(other *TaprootBip32Derivation) bool return bytes.Compare(s.XOnlyPubKey, other.XOnlyPubKey) < 0 } -// readTaprootBip32Derivation deserializes a byte slice containing the Taproot +// ReadTaprootBip32Derivation deserializes a byte slice containing the Taproot // BIP32 derivation info that consists of a list of leaf hashes as well as the // normal BIP32 derivation info. -func readTaprootBip32Derivation(xOnlyPubKey, +func ReadTaprootBip32Derivation(xOnlyPubKey, value []byte) (*TaprootBip32Derivation, error) { // The taproot key BIP 32 derivation path is defined as: @@ -141,7 +142,7 @@ func readTaprootBip32Derivation(xOnlyPubKey, } // Read the BIP32 derivation info. - fingerprint, path, err := readBip32Derivation(leftoverBuf.Bytes()) + fingerprint, path, err := ReadBip32Derivation(leftoverBuf.Bytes()) if err != nil { return nil, err } @@ -152,9 +153,9 @@ func readTaprootBip32Derivation(xOnlyPubKey, return &derivation, nil } -// serializeTaprootBip32Derivation serializes a TaprootBip32Derivation to its +// SerializeTaprootBip32Derivation serializes a TaprootBip32Derivation to its // raw byte representation. -func serializeTaprootBip32Derivation(d *TaprootBip32Derivation) ([]byte, +func SerializeTaprootBip32Derivation(d *TaprootBip32Derivation) ([]byte, error) { var buf bytes.Buffer diff --git a/btcutil/psbt/utils.go b/btcutil/psbt/utils.go index 94a5546a55..85bc82f529 100644 --- a/btcutil/psbt/utils.go +++ b/btcutil/psbt/utils.go @@ -245,7 +245,7 @@ func getKey(r io.Reader) (int, []byte, error) { // Check that we don't attempt to decode a dangerously large key. if count > MaxPsbtKeyLength { - return -1, nil, ErrInvalidKeydata + return -1, nil, ErrInvalidKeyData } // Next, we ready out the designated number of bytes, which may include @@ -398,6 +398,30 @@ func VerifyInputOutputLen(packet *Packet, needInputs, needOutputs bool) error { return nil } +// InputsReadyToSign makes sure that all input data have the previous output +// specified meaning that either nonwitness UTXO or the witness UTXO data is +// specified in the psbt package. This check is necessary because of 2 reasons. +// The sighash calculation is now different for witnessV0 and witnessV1 inputs +// this means we need to check the previous output pkScript for the specific +// type and the second reason is that the sighash calculation for taproot inputs +// include the previous output pkscripts. +func InputsReadyToSign(packet *Packet) error { + err := VerifyInputOutputLen(packet, true, true) + if err != nil { + return err + } + + for i := range packet.UnsignedTx.TxIn { + input := packet.Inputs[i] + if input.NonWitnessUtxo == nil && input.WitnessUtxo == nil { + return fmt.Errorf("invalid PSBT, input with index %d "+ + "missing utxo information", i) + } + } + + return nil +} + // NewFromSignedTx is a utility function to create a packet from an // already-signed transaction. Returned are: an unsigned transaction // serialization, a list of scriptSigs, one per input, and a list of witnesses, diff --git a/btcutil/tx.go b/btcutil/tx.go index 5633fef90e..4f26befe32 100644 --- a/btcutil/tx.go +++ b/btcutil/tx.go @@ -27,6 +27,7 @@ type Tx struct { txHashWitness *chainhash.Hash // Cached transaction witness hash txHasWitness *bool // If the transaction has witness data txIndex int // Position within a block or TxIndexUnknown + rawBytes []byte // Raw bytes for the tx in the raw block. } // MsgTx returns the underlying wire.MsgTx for the transaction. @@ -35,24 +36,82 @@ func (t *Tx) MsgTx() *wire.MsgTx { return t.msgTx } -// Hash returns the hash of the transaction. This is equivalent to -// calling TxHash on the underlying wire.MsgTx, however it caches the -// result so subsequent calls are more efficient. +// Hash returns the hash of the transaction. This is equivalent to calling +// TxHash on the underlying wire.MsgTx, however it caches the result so +// subsequent calls are more efficient. If the Tx has the raw bytes of the tx +// cached, it will use that and skip serialization. func (t *Tx) Hash() *chainhash.Hash { // Return the cached hash if it has already been generated. if t.txHash != nil { return t.txHash } - // Cache the hash and return it. - hash := t.msgTx.TxHash() + // If the rawBytes aren't available, call msgtx.TxHash. + if t.rawBytes == nil { + hash := t.msgTx.TxHash() + t.txHash = &hash + return &hash + } + + // If we have the raw bytes, then don't call msgTx.TxHash as that has + // the overhead of serialization. Instead, we can take the existing + // serialized bytes and hash them to speed things up. + var hash chainhash.Hash + if t.HasWitness() { + // If the raw bytes contain the witness, we must strip it out + // before calculating the hash. + baseSize := t.msgTx.SerializeSizeStripped() + nonWitnessBytes := make([]byte, 0, baseSize) + + // Append the version bytes. + offset := 4 + nonWitnessBytes = append( + nonWitnessBytes, t.rawBytes[:offset]..., + ) + + // Append the input and output bytes. -8 to account for the + // version bytes and the locktime bytes. + // + // Skip the 2 bytes for the witness encoding. + offset += 2 + nonWitnessBytes = append( + nonWitnessBytes, + t.rawBytes[offset:offset+baseSize-8]..., + ) + + // Append the last 4 bytes which are the locktime bytes. + nonWitnessBytes = append( + nonWitnessBytes, t.rawBytes[len(t.rawBytes)-4:]..., + ) + + // We purposely call doublehashh here instead of doublehashraw + // as we don't have the serialization overhead and avoiding the + // 1 alloc is better in this case. + hash = chainhash.DoubleHashRaw(func(w io.Writer) error { + _, err := w.Write(nonWitnessBytes) + return err + }) + } else { + // If the raw bytes don't have the witness, we can use it + // directly. + // + // We purposely call doublehashh here instead of doublehashraw + // as we don't have the serialization overhead and avoiding the + // 1 alloc is better in this case. + hash = chainhash.DoubleHashRaw(func(w io.Writer) error { + _, err := w.Write(t.rawBytes) + return err + }) + } + t.txHash = &hash return &hash } // WitnessHash returns the witness hash (wtxid) of the transaction. This is // equivalent to calling WitnessHash on the underlying wire.MsgTx, however it -// caches the result so subsequent calls are more efficient. +// caches the result so subsequent calls are more efficient. If the Tx has the +// raw bytes of the tx cached, it will use that and skip serialization. func (t *Tx) WitnessHash() *chainhash.Hash { // Return the cached hash if it has already been generated. if t.txHashWitness != nil { @@ -60,7 +119,13 @@ func (t *Tx) WitnessHash() *chainhash.Hash { } // Cache the hash and return it. - hash := t.msgTx.WitnessHash() + var hash chainhash.Hash + if len(t.rawBytes) > 0 { + hash = chainhash.DoubleHashH(t.rawBytes) + } else { + hash = t.msgTx.WitnessHash() + } + t.txHashWitness = &hash return &hash } @@ -99,6 +164,11 @@ func NewTx(msgTx *wire.MsgTx) *Tx { } } +// setBytes sets the raw bytes of the tx. +func (t *Tx) setBytes(bytes []byte) { + t.rawBytes = bytes +} + // NewTxFromBytes returns a new instance of a bitcoin transaction given the // serialized bytes. See Tx. func NewTxFromBytes(serializedTx []byte) (*Tx, error) { diff --git a/btcutil/tx_test.go b/btcutil/tx_test.go index 828fc065b1..71b7488e9d 100644 --- a/btcutil/tx_test.go +++ b/btcutil/tx_test.go @@ -10,8 +10,8 @@ import ( "reflect" "testing" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/davecgh/go-spew/spew" ) diff --git a/btcutil/txsort/doc.go b/btcutil/txsort/doc.go index e89c4d23d3..9f5095ce8b 100644 --- a/btcutil/txsort/doc.go +++ b/btcutil/txsort/doc.go @@ -5,7 +5,7 @@ /* Package txsort provides the transaction sorting according to BIP 69. -Overview +# Overview BIP 69 defines a standard lexicographical sort order of transaction inputs and outputs. This is useful to standardize transactions for faster multi-party diff --git a/btcutil/txsort/txsort_test.go b/btcutil/txsort/txsort_test.go index 7d5c2d3eaf..dd2149294e 100644 --- a/btcutil/txsort/txsort_test.go +++ b/btcutil/txsort/txsort_test.go @@ -11,8 +11,8 @@ import ( "path/filepath" "testing" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/btcutil/txsort" + "github.com/btcsuite/btcd/wire" ) // TestSort ensures the transaction sorting works according to the BIP. diff --git a/btcutil/wif.go b/btcutil/wif.go index a28cc8ba8d..26316dd7ff 100644 --- a/btcutil/wif.go +++ b/btcutil/wif.go @@ -68,14 +68,14 @@ func (w *WIF) IsForNet(net *chaincfg.Params) bool { // The WIF string must be a base58-encoded string of the following byte // sequence: // -// * 1 byte to identify the network, must be 0x80 for mainnet or 0xef for -// either testnet3 or the regression test network -// * 32 bytes of a binary-encoded, big-endian, zero-padded private key -// * Optional 1 byte (equal to 0x01) if the address being imported or exported -// was created by taking the RIPEMD160 after SHA256 hash of a serialized -// compressed (33-byte) public key -// * 4 bytes of checksum, must equal the first four bytes of the double SHA256 -// of every byte before the checksum in this sequence +// - 1 byte to identify the network, must be 0x80 for mainnet or 0xef for +// either testnet3 or the regression test network +// - 32 bytes of a binary-encoded, big-endian, zero-padded private key +// - Optional 1 byte (equal to 0x01) if the address being imported or exported +// was created by taking the RIPEMD160 after SHA256 hash of a serialized +// compressed (33-byte) public key +// - 4 bytes of checksum, must equal the first four bytes of the double SHA256 +// of every byte before the checksum in this sequence // // If the base58-decoded byte sequence does not match this, DecodeWIF will // return a non-nil error. ErrMalformedPrivateKey is returned when the WIF diff --git a/chaincfg/chainhash/hash.go b/chaincfg/chainhash/hash.go index 764ec3c40a..4aa7aeb64c 100644 --- a/chaincfg/chainhash/hash.go +++ b/chaincfg/chainhash/hash.go @@ -8,6 +8,7 @@ package chainhash import ( "crypto/sha256" "encoding/hex" + "encoding/json" "fmt" ) @@ -110,6 +111,32 @@ func (hash *Hash) IsEqual(target *Hash) bool { return *hash == *target } +// MarshalJSON serialises the hash as a JSON appropriate string value. +func (hash Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(hash.String()) +} + +// UnmarshalJSON parses the hash with JSON appropriate string value. +func (hash *Hash) UnmarshalJSON(input []byte) error { + // If the first byte indicates an array, the hash could have been marshalled + // using the legacy method and e.g. persisted. + if len(input) > 0 && input[0] == '[' { + return decodeLegacy(hash, input) + } + + var sh string + err := json.Unmarshal(input, &sh) + if err != nil { + return err + } + newHash, err := NewHashFromStr(sh) + if err != nil { + return err + } + + return hash.SetBytes(newHash[:]) +} + // NewHash returns a new Hash from a byte slice. An error is returned if // the number of bytes passed in is not HashSize. func NewHash(newHash []byte) (*Hash, error) { @@ -196,3 +223,17 @@ func Decode(dst *Hash, src string) error { return nil } + +// decodeLegacy decodes an Hash that has been encoded with the legacy method +// (i.e. represented as a bytes array) to a destination. +func decodeLegacy(dst *Hash, src []byte) error { + var hashBytes []byte + err := json.Unmarshal(src, &hashBytes) + if err != nil { + return err + } + if len(hashBytes) != HashSize { + return ErrHashStrSize + } + return dst.SetBytes(hashBytes) +} diff --git a/chaincfg/chainhash/hash_test.go b/chaincfg/chainhash/hash_test.go index 07f54c7763..85738a66c3 100644 --- a/chaincfg/chainhash/hash_test.go +++ b/chaincfg/chainhash/hash_test.go @@ -7,6 +7,7 @@ package chainhash import ( "bytes" "encoding/hex" + "encoding/json" "testing" ) @@ -194,3 +195,38 @@ func TestNewHashFromStr(t *testing.T) { } } } + +// TestHashJsonMarshal tests json marshal and unmarshal. +func TestHashJsonMarshal(t *testing.T) { + hashStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" + legacyHashStr := []byte("[6,229,51,253,26,218,134,57,31,63,108,52,50,4,176,210,120,212,170,236,28,11,32,170,39,186,3,0,0,0,0,0]") + + hash, err := NewHashFromStr(hashStr) + if err != nil { + t.Errorf("NewHashFromStr error:%v, hashStr:%s", err, hashStr) + } + + hashBytes, err := json.Marshal(hash) + if err != nil { + t.Errorf("Marshal json error:%v, hash:%v", err, hashBytes) + } + + var newHash Hash + err = json.Unmarshal(hashBytes, &newHash) + if err != nil { + t.Errorf("Unmarshal json error:%v, hash:%v", err, hashBytes) + } + + if !hash.IsEqual(&newHash) { + t.Errorf("String: wrong hash string - got %v, want %v", newHash.String(), hashStr) + } + + err = newHash.UnmarshalJSON(legacyHashStr) + if err != nil { + t.Errorf("Unmarshal legacy json error:%v, hash:%v", err, legacyHashStr) + } + + if !hash.IsEqual(&newHash) { + t.Errorf("String: wrong hash string - got %v, want %v", newHash.String(), hashStr) + } +} diff --git a/chaincfg/chainhash/hashfuncs.go b/chaincfg/chainhash/hashfuncs.go index bf74f73c39..5be8a4d467 100644 --- a/chaincfg/chainhash/hashfuncs.go +++ b/chaincfg/chainhash/hashfuncs.go @@ -5,7 +5,10 @@ package chainhash -import "crypto/sha256" +import ( + "crypto/sha256" + "io" +) // HashB calculates hash(b) and returns the resulting bytes. func HashB(b []byte) []byte { @@ -31,3 +34,24 @@ func DoubleHashH(b []byte) Hash { first := sha256.Sum256(b) return Hash(sha256.Sum256(first[:])) } + +// DoubleHashRaw calculates hash(hash(w)) where w is the resulting bytes from +// the given serialize function and returns the resulting bytes as a Hash. +func DoubleHashRaw(serialize func(w io.Writer) error) Hash { + // Encode the transaction into the hash. Ignore the error returns + // since the only way the encode could fail is being out of memory + // or due to nil pointers, both of which would cause a run-time panic. + h := sha256.New() + _ = serialize(h) + + // This buf is here because Sum() will append the result to the passed + // in byte slice. Pre-allocating here saves an allocation on the second + // hash as we can reuse it. This allocation also does not escape to the + // heap, saving an allocation. + buf := make([]byte, 0, HashSize) + first := h.Sum(buf) + h.Reset() + h.Write(first) + res := h.Sum(buf) + return *(*Hash)(res) +} diff --git a/chaincfg/chainhash/hashfuncs_test.go b/chaincfg/chainhash/hashfuncs_test.go index bcd6f22200..6b9ff9a97f 100644 --- a/chaincfg/chainhash/hashfuncs_test.go +++ b/chaincfg/chainhash/hashfuncs_test.go @@ -6,6 +6,7 @@ package chainhash import ( "fmt" + "io" "testing" ) @@ -133,4 +134,20 @@ func TestDoubleHashFuncs(t *testing.T) { continue } } + + // Ensure the hash function which accepts a hash.Hash returns the expected + // result when given a hash.Hash that is of type SHA256. + for _, test := range tests { + serialize := func(w io.Writer) error { + w.Write([]byte(test.in)) + return nil + } + hash := DoubleHashRaw(serialize) + h := fmt.Sprintf("%x", hash[:]) + if h != test.out { + t.Errorf("DoubleHashRaw(%q) = %s, want %s", test.in, h, + test.out) + continue + } + } } diff --git a/chaincfg/doc.go b/chaincfg/doc.go index 1595b2769f..65efb54f66 100644 --- a/chaincfg/doc.go +++ b/chaincfg/doc.go @@ -18,40 +18,40 @@ // When a network parameter is needed, it may then be looked up through this // variable (either directly, or hidden in a library call). // -// package main +// package main // -// import ( -// "flag" -// "fmt" -// "log" +// import ( +// "flag" +// "fmt" +// "log" // -// "github.com/btcsuite/btcd/btcutil" -// "github.com/btcsuite/btcd/chaincfg" -// ) +// "github.com/btcsuite/btcd/btcutil" +// "github.com/btcsuite/btcd/chaincfg" +// ) // -// var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network") +// var testnet = flag.Bool("testnet", false, "operate on the testnet Bitcoin network") // -// // By default (without -testnet), use mainnet. -// var chainParams = &chaincfg.MainNetParams +// // By default (without -testnet), use mainnet. +// var chainParams = &chaincfg.MainNetParams // -// func main() { -// flag.Parse() +// func main() { +// flag.Parse() // -// // Modify active network parameters if operating on testnet. -// if *testnet { -// chainParams = &chaincfg.TestNet3Params -// } +// // Modify active network parameters if operating on testnet. +// if *testnet { +// chainParams = &chaincfg.TestNet3Params +// } // -// // later... +// // later... // -// // Create and print new payment address, specific to the active network. -// pubKeyHash := make([]byte, 20) -// addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, chainParams) -// if err != nil { -// log.Fatal(err) -// } -// fmt.Println(addr) -// } +// // Create and print new payment address, specific to the active network. +// pubKeyHash := make([]byte, 20) +// addr, err := btcutil.NewAddressPubKeyHash(pubKeyHash, chainParams) +// if err != nil { +// log.Fatal(err) +// } +// fmt.Println(addr) +// } // // If an application does not use one of the three standard Bitcoin networks, // a new Params struct may be created which defines the parameters for the diff --git a/chaincfg/params.go b/chaincfg/params.go index 2387c203ae..3a7f7661e1 100644 --- a/chaincfg/params.go +++ b/chaincfg/params.go @@ -183,6 +183,11 @@ type Params struct { // block in compact form. PowLimitBits uint32 + // PoWNoRetargeting defines whether the network has difficulty + // retargeting enabled or not. This should only be set to true for + // regtest like networks. + PoWNoRetargeting bool + // These fields define the block heights at which the specified softfork // BIP became active. BIP0034Height int32 @@ -282,6 +287,7 @@ var MainNetParams = Params{ {"seed.bitcoinstats.com", true}, {"seed.bitnodes.io", false}, {"seed.bitcoin.jonasschnelli.ch", true}, + {"seed.btc.petertodd.net", true}, }, // Chain parameters @@ -328,6 +334,16 @@ var MainNetParams = Params{ {520000, newHashFromStr("0000000000000000000d26984c0229c9f6962dc74db0a6d525f2f1640396f69c")}, {550000, newHashFromStr("000000000000000000223b7a2298fb1c6c75fb0efc28a4c56853ff4112ec6bc9")}, {560000, newHashFromStr("0000000000000000002c7b276daf6efb2b6aa68e2ce3be67ef925b3264ae7122")}, + {563378, newHashFromStr("0000000000000000000f1c54590ee18d15ec70e68c8cd4cfbadb1b4f11697eee")}, + {597379, newHashFromStr("00000000000000000005f8920febd3925f8272a6a71237563d78c2edfdd09ddf")}, + {623950, newHashFromStr("0000000000000000000f2adce67e49b0b6bdeb9de8b7c3d7e93b21e7fc1e819d")}, + {654683, newHashFromStr("0000000000000000000b9d2ec5a352ecba0592946514a92f14319dc2b367fc72")}, + {691719, newHashFromStr("00000000000000000008a89e854d57e5667df88f1cdef6fde2fbca1de5b639ad")}, + {724466, newHashFromStr("000000000000000000052d314a259755ca65944e68df6b12a067ea8f1f5a7091")}, + {751565, newHashFromStr("00000000000000000009c97098b5295f7e5f183ac811fb5d1534040adb93cabd")}, + {781565, newHashFromStr("00000000000000000002b8c04999434c33b8e033f11a977b288f8411766ee61c")}, + {800000, newHashFromStr("00000000000000000002a7c4c1e48d76c5a37902165a270156b7a8d72728a054")}, + {810000, newHashFromStr("000000000000000000028028ca82b6aa81ce789e4eb9e0321b74c3cbaf405dd1")}, }, // Consensus rule change deployments. @@ -425,6 +441,7 @@ var RegressionNetParams = Params{ GenesisHash: ®TestGenesisHash, PowLimit: regressionPowLimit, PowLimitBits: 0x207fffff, + PoWNoRetargeting: true, CoinbaseMaturity: 100, BIP0034Height: 100000000, // Not active - Permit ver 1 blocks BIP0065Height: 1351, // Used by regression tests @@ -528,7 +545,7 @@ var TestNet3Params = Params{ DNSSeeds: []DNSSeed{ {"testnet-seed.bitcoin.jonasschnelli.ch", true}, {"testnet-seed.bitcoin.schildbach.de", false}, - {"seed.tbtc.petertodd.org", true}, + {"seed.tbtc.petertodd.net", true}, {"testnet-seed.bluematt.me", false}, }, @@ -565,6 +582,13 @@ var TestNet3Params = Params{ {1100007, newHashFromStr("00000000000abc7b2cd18768ab3dee20857326a818d1946ed6796f42d66dd1e8")}, {1200007, newHashFromStr("00000000000004f2dc41845771909db57e04191714ed8c963f7e56713a7b6cea")}, {1300007, newHashFromStr("0000000072eab69d54df75107c052b26b0395b44f77578184293bf1bb1dbd9fa")}, + {1354312, newHashFromStr("0000000000000037a8cd3e06cd5edbfe9dd1dbcc5dacab279376ef7cfc2b4c75")}, + {1580000, newHashFromStr("00000000000000b7ab6ce61eb6d571003fbe5fe892da4c9b740c49a07542462d")}, + {1692000, newHashFromStr("000000000000056c49030c174179b52a928c870e6e8a822c75973b7970cfbd01")}, + {1864000, newHashFromStr("000000000000006433d1efec504c53ca332b64963c425395515b01977bd7b3b0")}, + {2010000, newHashFromStr("0000000000004ae2f3896ca8ecd41c460a35bf6184e145d91558cece1c688a76")}, + {2143398, newHashFromStr("00000000000163cfb1f97c4e4098a3692c8053ad9cab5ad9c86b338b5c00b8b7")}, + {2344474, newHashFromStr("0000000000000004877fa2d36316398528de4f347df2f8a96f76613a298ce060")}, }, // Consensus rule change deployments. @@ -993,8 +1017,9 @@ func IsBech32SegwitPrefix(prefix string) bool { // ErrInvalidHDKeyID error will be returned. // // Reference: -// SLIP-0132 : Registered HD version bytes for BIP-0032 -// https://github.com/satoshilabs/slips/blob/master/slip-0132.md +// +// SLIP-0132 : Registered HD version bytes for BIP-0032 +// https://github.com/satoshilabs/slips/blob/master/slip-0132.md func RegisterHDKeyID(hdPublicKeyID []byte, hdPrivateKeyID []byte) error { if len(hdPublicKeyID) != 4 || len(hdPrivateKeyID) != 4 { return ErrInvalidHDKeyID diff --git a/cmd/addblock/config.go b/cmd/addblock/config.go index c638196534..ffcc0eca79 100644 --- a/cmd/addblock/config.go +++ b/cmd/addblock/config.go @@ -9,11 +9,11 @@ import ( "os" "path/filepath" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/database" _ "github.com/btcsuite/btcd/database/ffldb" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" flags "github.com/jessevdk/go-flags" ) diff --git a/cmd/addblock/import.go b/cmd/addblock/import.go index 4875ce11cb..8eda8f8c9b 100644 --- a/cmd/addblock/import.go +++ b/cmd/addblock/import.go @@ -13,10 +13,10 @@ import ( "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/blockchain/indexers" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) var zeroHash = chainhash.Hash{} @@ -287,6 +287,16 @@ func (bi *blockImporter) Import() chan *importResults { // the status handler when done. go func() { bi.wg.Wait() + + // Flush the changes made to the blockchain. + log.Info("Flushing blockchain caches to the disk...") + if err := bi.chain.FlushUtxoCache(blockchain.FlushRequired); err != nil { + log.Errorf("Error while flushing the blockchain state: %v", err) + bi.errChan <- err + return + } + log.Info("Done flushing blockchain caches to disk") + bi.doneChan <- true }() diff --git a/cmd/btcctl/config.go b/cmd/btcctl/config.go index 3db735c5d8..f6ca8846ec 100644 --- a/cmd/btcctl/config.go +++ b/cmd/btcctl/config.go @@ -14,8 +14,8 @@ import ( "strings" "github.com/btcsuite/btcd/btcjson" - "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" flags "github.com/jessevdk/go-flags" ) @@ -143,7 +143,7 @@ func normalizeAddress(addr string, chain *chaincfg.Params, useWallet bool) (stri if useWallet { defaultPort = "38332" } else { - defaultPort = "38332" + defaultPort = "38334" } default: if useWallet { @@ -158,7 +158,7 @@ func normalizeAddress(addr string, chain *chaincfg.Params, useWallet bool) (stri return addr, nil } -// cleanAndExpandPath expands environement variables and leading ~ in the +// cleanAndExpandPath expands environment variables and leading ~ in the // passed path, cleans the result, and returns it. func cleanAndExpandPath(path string) string { // Expand initial ~ to OS specific home directory. @@ -176,10 +176,10 @@ func cleanAndExpandPath(path string) string { // line options. // // The configuration proceeds as follows: -// 1) Start with a default config with sane settings -// 2) Pre-parse the command line to check for an alternative config file -// 3) Load configuration file overwriting defaults with any specified options -// 4) Parse CLI options and overwrite/add any specified options +// 1. Start with a default config with sane settings +// 2. Pre-parse the command line to check for an alternative config file +// 3. Load configuration file overwriting defaults with any specified options +// 4. Parse CLI options and overwrite/add any specified options // // The above results in functioning properly without any config settings // while still allowing the user to override settings with config files and diff --git a/cmd/btcctl/version.go b/cmd/btcctl/version.go index 42f3f7d024..8da196bea8 100644 --- a/cmd/btcctl/version.go +++ b/cmd/btcctl/version.go @@ -66,7 +66,7 @@ func normalizeVerString(str string) string { for _, r := range str { if strings.ContainsRune(semanticAlphabet, r) { // Ignoring the error here since it can only fail if - // the the system is out of memory and there are much + // the system is out of memory and there are much // bigger issues at that point. _, _ = result.WriteRune(r) } diff --git a/cmd/findcheckpoint/config.go b/cmd/findcheckpoint/config.go index 5671b5b186..203ed27faf 100644 --- a/cmd/findcheckpoint/config.go +++ b/cmd/findcheckpoint/config.go @@ -9,11 +9,11 @@ import ( "os" "path/filepath" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/database" _ "github.com/btcsuite/btcd/database/ffldb" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" flags "github.com/jessevdk/go-flags" ) diff --git a/cmd/gencerts/gencerts.go b/cmd/gencerts/gencerts.go index c0a27ad37b..27c9ae385c 100644 --- a/cmd/gencerts/gencerts.go +++ b/cmd/gencerts/gencerts.go @@ -76,7 +76,7 @@ func main() { } } -// cleanAndExpandPath expands environement variables and leading ~ in the +// cleanAndExpandPath expands environment variables and leading ~ in the // passed path, cleans the result, and returns it. func cleanAndExpandPath(path string) string { // Expand initial ~ to OS specific home directory. diff --git a/config.go b/config.go index 5eca3f2174..18620a008c 100644 --- a/config.go +++ b/config.go @@ -22,6 +22,7 @@ import ( "time" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/connmgr" @@ -30,7 +31,6 @@ import ( "github.com/btcsuite/btcd/mempool" "github.com/btcsuite/btcd/peer" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/go-socks/socks" flags "github.com/jessevdk/go-flags" ) @@ -63,9 +63,11 @@ const ( defaultMaxOrphanTransactions = 100 defaultMaxOrphanTxSize = 100000 defaultSigCacheMaxSize = 100000 + defaultUtxoCacheMaxSizeMiB = 250 sampleConfigFilename = "sample-btcd.conf" defaultTxIndex = false defaultAddrIndex = false + pruneMinSize = 1536 ) var ( @@ -103,14 +105,15 @@ type config struct { BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"` BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."` BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"` - BlockMinSize uint32 `long:"blockminsize" description:"Mininum block size in bytes to be used when creating a block"` + BlockMinSize uint32 `long:"blockminsize" description:"Minimum block size in bytes to be used when creating a block"` BlockMaxWeight uint32 `long:"blockmaxweight" description:"Maximum block weight to be used when creating a block"` - BlockMinWeight uint32 `long:"blockminweight" description:"Mininum block weight to be used when creating a block"` + BlockMinWeight uint32 `long:"blockminweight" description:"Minimum block weight to be used when creating a block"` BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"` BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."` ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"` ConnectPeers []string `long:"connect" description:"Connect only to the specified peers at startup"` CPUProfile string `long:"cpuprofile" description:"Write CPU profile to the specified file"` + MemoryProfile string `long:"memprofile" description:"Write memory profile to the specified file"` DataDir string `short:"b" long:"datadir" description:"Directory to store data"` DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify =,=,... to set the log level for individual subsystems -- Use show to list available subsystems"` @@ -145,6 +148,7 @@ type config struct { Proxy string `long:"proxy" description:"Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)"` ProxyPass string `long:"proxypass" default-mask:"-" description:"Password for proxy server"` ProxyUser string `long:"proxyuser" description:"Username for proxy server"` + Prune uint64 `long:"prune" description:"Prune already validated blocks from the database. Must specify a target size in MiB (minimum value of 1536, default value of 0 will disable pruning)"` RegressionTest bool `long:"regtest" description:"Use the regression test network"` RejectNonStd bool `long:"rejectnonstd" description:"Reject non-standard transactions regardless of the default settings for the active network."` RejectReplacement bool `long:"rejectreplacement" description:"Reject transactions that attempt to replace existing transactions within the mempool through the Replace-By-Fee (RBF) signaling policy."` @@ -168,6 +172,7 @@ type config struct { TestNet3 bool `long:"testnet" description:"Use the test network"` TorIsolation bool `long:"torisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."` TrickleInterval time.Duration `long:"trickleinterval" description:"Minimum time between attempts to send new inventory to a connected peer"` + UtxoCacheMaxSizeMiB uint `long:"utxocachemaxsize" description:"The maximum size in MiB of the UTXO cache"` TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"` UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."` Upnp bool `long:"upnp" description:"Use UPnP to map our listening port outside of NAT"` @@ -402,10 +407,10 @@ func newConfigParser(cfg *config, so *serviceOptions, options flags.Options) *fl // line options. // // The configuration proceeds as follows: -// 1) Start with a default config with sane settings -// 2) Pre-parse the command line to check for an alternative config file -// 3) Load configuration file overwriting defaults with any specified options -// 4) Parse CLI options and overwrite/add any specified options +// 1. Start with a default config with sane settings +// 2. Pre-parse the command line to check for an alternative config file +// 3. Load configuration file overwriting defaults with any specified options +// 4. Parse CLI options and overwrite/add any specified options // // The above results in btcd functioning properly without any config settings // while still allowing the user to override settings with config files and @@ -436,6 +441,7 @@ func loadConfig() (*config, []string, error) { BlockPrioritySize: mempool.DefaultBlockPrioritySize, MaxOrphanTxs: defaultMaxOrphanTransactions, SigCacheMaxSize: defaultSigCacheMaxSize, + UtxoCacheMaxSizeMiB: defaultUtxoCacheMaxSizeMiB, Generate: defaultGenerate, TxIndex: defaultTxIndex, AddrIndex: defaultAddrIndex, @@ -812,7 +818,7 @@ func loadConfig() (*config, []string, error) { return nil, nil, err } - // Validate the the minrelaytxfee. + // Validate the minrelaytxfee. cfg.minRelayTxFee, err = btcutil.NewAmount(cfg.MinRelayTxFee) if err != nil { str := "%s: invalid minrelaytxfee: %v" @@ -1136,6 +1142,30 @@ func loadConfig() (*config, []string, error) { } } + if cfg.Prune != 0 && cfg.Prune < pruneMinSize { + err := fmt.Errorf("%s: the minimum value for --prune is %d. Got %d", + funcName, pruneMinSize, cfg.Prune) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + if cfg.Prune != 0 && cfg.TxIndex { + err := fmt.Errorf("%s: the --prune and --txindex options may "+ + "not be activated at the same time", funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + + if cfg.Prune != 0 && cfg.AddrIndex { + err := fmt.Errorf("%s: the --prune and --addrindex options may "+ + "not be activated at the same time", funcName) + fmt.Fprintln(os.Stderr, err) + fmt.Fprintln(os.Stderr, usageMessage) + return nil, nil, err + } + // Warn about missing config file only after all other configuration is // done. This prevents the warning on help messages and invalid // options. Note this should go directly before the return. diff --git a/connmgr/connmanager.go b/connmgr/connmanager.go index 9a68190267..b487bd1ba1 100644 --- a/connmgr/connmanager.go +++ b/connmgr/connmanager.go @@ -210,11 +210,16 @@ func (cm *ConnManager) handleFailedConn(c *ConnReq) { log.Debugf("Max failed connection attempts reached: [%d] "+ "-- retrying connection in: %v", maxFailedAttempts, cm.cfg.RetryDuration) + theId := c.id time.AfterFunc(cm.cfg.RetryDuration, func() { + cm.Remove(theId) cm.NewConnReq() }) } else { - go cm.NewConnReq() + go func(theId uint64) { + cm.Remove(theId) + cm.NewConnReq() + }(c.id) } } } diff --git a/connmgr/doc.go b/connmgr/doc.go index acb90c31a9..d101c4347f 100644 --- a/connmgr/doc.go +++ b/connmgr/doc.go @@ -5,7 +5,7 @@ /* Package connmgr implements a generic Bitcoin network connection manager. -Connection Manager Overview +# Connection Manager Overview Connection Manager handles all the general connection concerns such as maintaining a set number of outbound connections, sourcing peers, banning, diff --git a/database/cmd/dbtool/globalconfig.go b/database/cmd/dbtool/globalconfig.go index 2ec746a43d..4e58168a33 100644 --- a/database/cmd/dbtool/globalconfig.go +++ b/database/cmd/dbtool/globalconfig.go @@ -10,11 +10,11 @@ import ( "os" "path/filepath" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/database" _ "github.com/btcsuite/btcd/database/ffldb" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) var ( diff --git a/database/cmd/dbtool/insecureimport.go b/database/cmd/dbtool/insecureimport.go index a01c74bb55..744e29f57b 100644 --- a/database/cmd/dbtool/insecureimport.go +++ b/database/cmd/dbtool/insecureimport.go @@ -12,10 +12,10 @@ import ( "sync" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // importCmd defines the configuration options for the insecureimport command. diff --git a/database/doc.go b/database/doc.go index 497206713f..80a2669da1 100644 --- a/database/doc.go +++ b/database/doc.go @@ -5,7 +5,7 @@ /* Package database provides a block and metadata storage database. -Overview +# Overview As of Feb 2016, there are over 400,000 blocks in the Bitcoin block chain and and over 112 million transactions (which turns out to be over 60GB of data). @@ -18,15 +18,15 @@ storage, and strict checksums in key areas to ensure data integrity. A quick overview of the features database provides are as follows: - - Key/value metadata store - - Bitcoin block storage - - Efficient retrieval of block headers and regions (transactions, scripts, etc) - - Read-only and read-write transactions with both manual and managed modes - - Nested buckets - - Supports registration of backend databases - - Comprehensive test coverage + - Key/value metadata store + - Bitcoin block storage + - Efficient retrieval of block headers and regions (transactions, scripts, etc) + - Read-only and read-write transactions with both manual and managed modes + - Nested buckets + - Supports registration of backend databases + - Comprehensive test coverage -Database +# Database The main entry point is the DB interface. It exposes functionality for transactional-based access and storage of metadata and block data. It is @@ -43,14 +43,14 @@ The Begin function provides an unmanaged transaction while the View and Update functions provide a managed transaction. These are described in more detail below. -Transactions +# Transactions The Tx interface provides facilities for rolling back or committing changes that took place while the transaction was active. It also provides the root metadata bucket under which all keys, values, and nested buckets are stored. A transaction can either be read-only or read-write and managed or unmanaged. -Managed versus Unmanaged Transactions +# Managed versus Unmanaged Transactions A managed transaction is one where the caller provides a function to execute within the context of the transaction and the commit or rollback is handled @@ -63,7 +63,7 @@ call Commit or Rollback when they are finished with it. Leaving transactions open for long periods of time can have several adverse effects, so it is recommended that managed transactions are used instead. -Buckets +# Buckets The Bucket interface provides the ability to manipulate key/value pairs and nested buckets as well as iterate through them. @@ -73,7 +73,7 @@ CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with buckets. The ForEach function allows the caller to provide a function to be called with each key/value pair and nested bucket in the current bucket. -Metadata Bucket +# Metadata Bucket As discussed above, all of the functions which are used to manipulate key/value pairs and nested buckets exist on the Bucket interface. The root metadata @@ -81,7 +81,7 @@ bucket is the upper-most bucket in which data is stored and is created at the same time as the database. Use the Metadata function on the Tx interface to retrieve it. -Nested Buckets +# Nested Buckets The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface provide the ability to create an arbitrary number of nested buckets. It is diff --git a/database/driver.go b/database/driver.go index 2999b61157..cb76d2fc62 100644 --- a/database/driver.go +++ b/database/driver.go @@ -62,7 +62,7 @@ func SupportedDrivers() []string { // arguments are specific to the database type driver. See the documentation // for the database driver for further details. // -// ErrDbUnknownType will be returned if the the database type is not registered. +// ErrDbUnknownType will be returned if the database type is not registered. func Create(dbType string, args ...interface{}) (DB, error) { drv, exists := drivers[dbType] if !exists { @@ -77,7 +77,7 @@ func Create(dbType string, args ...interface{}) (DB, error) { // specific to the database type driver. See the documentation for the database // driver for further details. // -// ErrDbUnknownType will be returned if the the database type is not registered. +// ErrDbUnknownType will be returned if the database type is not registered. func Open(dbType string, args ...interface{}) (DB, error) { drv, exists := drivers[dbType] if !exists { diff --git a/database/ffldb/bench_test.go b/database/ffldb/bench_test.go index f4a0eb32e5..95e498b274 100644 --- a/database/ffldb/bench_test.go +++ b/database/ffldb/bench_test.go @@ -9,9 +9,9 @@ import ( "path/filepath" "testing" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/database" - "github.com/btcsuite/btcd/btcutil" ) // BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis diff --git a/database/ffldb/blockio.go b/database/ffldb/blockio.go index 8fb27ab283..2b415a17b0 100644 --- a/database/ffldb/blockio.go +++ b/database/ffldb/blockio.go @@ -15,6 +15,9 @@ import ( "io" "os" "path/filepath" + "sort" + "strconv" + "strings" "sync" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -23,6 +26,10 @@ import ( ) const ( + // blockFileExtension is the extension that's used to store the block + // files on the disk. + blockFileExtension = ".fdb" + // The Bitcoin protocol encodes block height as int32, so max number of // blocks is 2^31. Max block size per the protocol is 32MiB per block. // So the theoretical max at the time this comment was written is 64PiB @@ -32,7 +39,7 @@ const ( // 512MiB each for a total of ~476.84PiB (roughly 7.4 times the current // theoretical max), so there is room for the max block size to grow in // the future. - blockFilenameTemplate = "%09d.fdb" + blockFilenameTemplate = "%09d" + blockFileExtension // maxOpenFiles is the max number of open files to maintain in the // open blocks cache. Note that this does not include the current @@ -133,7 +140,7 @@ type blockStore struct { // openBlocksLRU tracks how the open files are refenced by pushing the // most recently used files to the front of the list thereby trickling // the least recently used files to end of the list. When a file needs - // to be closed due to exceeding the the max number of allowed open + // to be closed due to exceeding the max number of allowed open // files, the one at the end of the list is closed. // // fileNumToLRUElem is a mapping between a specific block file number @@ -622,8 +629,8 @@ func (s *blockStore) syncBlocks() error { // were partially written. // // There are effectively two scenarios to consider here: -// 1) Transient write failures from which recovery is possible -// 2) More permanent failures such as hard disk death and/or removal +// 1. Transient write failures from which recovery is possible +// 2. More permanent failures such as hard disk death and/or removal // // In either case, the write cursor will be repositioned to the old block file // offset regardless of any other errors that occur while attempting to undo @@ -713,36 +720,57 @@ func (s *blockStore) handleRollback(oldBlockFileNum, oldBlockOffset uint32) { } // scanBlockFiles searches the database directory for all flat block files to -// find the end of the most recent file. This position is considered the -// current write cursor which is also stored in the metadata. Thus, it is used -// to detect unexpected shutdowns in the middle of writes so the block files -// can be reconciled. -func scanBlockFiles(dbPath string) (int, uint32) { - lastFile := -1 - fileLen := uint32(0) - for i := 0; ; i++ { - filePath := blockFilePath(dbPath, uint32(i)) - st, err := os.Stat(filePath) - if err != nil { - break - } - lastFile = i +// find the first file, last file, and the end of the most recent file. The +// position at the last file is considered the current write cursor which is +// also stored in the metadata. Thus, it is used to detect unexpected shutdowns +// in the middle of writes so the block files can be reconciled. +func scanBlockFiles(dbPath string) (int, int, uint32, error) { + firstFile, lastFile, lastFileLen, err := int(-1), int(-1), uint32(0), error(nil) + + files, err := filepath.Glob(filepath.Join(dbPath, "*"+blockFileExtension)) + if err != nil { + return 0, 0, 0, err + } + sort.Strings(files) - fileLen = uint32(st.Size()) + // Return early if there's no block files. + if len(files) == 0 { + return firstFile, lastFile, lastFileLen, nil } - log.Tracef("Scan found latest block file #%d with length %d", lastFile, - fileLen) - return lastFile, fileLen + // Grab the first and last file's number. + firstFile, err = strconv.Atoi(strings.TrimSuffix(filepath.Base(files[0]), blockFileExtension)) + if err != nil { + return 0, 0, 0, fmt.Errorf("scanBlockFiles error: %v", err) + } + lastFile, err = strconv.Atoi(strings.TrimSuffix(filepath.Base(files[len(files)-1]), blockFileExtension)) + if err != nil { + return 0, 0, 0, fmt.Errorf("scanBlockFiles error: %v", err) + } + + // Get the last file's length. + filePath := blockFilePath(dbPath, uint32(lastFile)) + st, err := os.Stat(filePath) + if err != nil { + return 0, 0, 0, err + } + lastFileLen = uint32(st.Size()) + + log.Tracef("Scan found latest block file #%d with length %d", lastFile, lastFileLen) + + return firstFile, lastFile, lastFileLen, err } // newBlockStore returns a new block store with the current block file number // and offset set and all fields initialized. -func newBlockStore(basePath string, network wire.BitcoinNet) *blockStore { +func newBlockStore(basePath string, network wire.BitcoinNet) (*blockStore, error) { // Look for the end of the latest block to file to determine what the // write cursor position is from the viewpoing of the block files on // disk. - fileNum, fileOff := scanBlockFiles(basePath) + _, fileNum, fileOff, err := scanBlockFiles(basePath) + if err != nil { + return nil, err + } if fileNum == -1 { fileNum = 0 fileOff = 0 @@ -765,5 +793,5 @@ func newBlockStore(basePath string, network wire.BitcoinNet) *blockStore { store.openFileFunc = store.openFile store.openWriteFileFunc = store.openWriteFile store.deleteFileFunc = store.deleteFile - return store + return store, nil } diff --git a/database/ffldb/db.go b/database/ffldb/db.go index f571d3889b..8fc4d32646 100644 --- a/database/ffldb/db.go +++ b/database/ffldb/db.go @@ -14,11 +14,11 @@ import ( "sort" "sync" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/database/internal/treap" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/comparer" ldberrors "github.com/syndtr/goleveldb/leveldb/errors" @@ -966,6 +966,10 @@ type transaction struct { pendingBlocks map[chainhash.Hash]int pendingBlockData []pendingBlock + // Files that need to be deleted on commit. These are the files that + // are marked as files to be deleted during pruning. + pendingDelFileNums []uint32 + // Keys that need to be stored or deleted on commit. pendingKeys *treap.Mutable pendingRemove *treap.Mutable @@ -1015,7 +1019,7 @@ func (tx *transaction) notifyActiveIters() { tx.activeIterLock.RUnlock() } -// checkClosed returns an error if the the database or transaction is closed. +// checkClosed returns an error if the database or transaction is closed. func (tx *transaction) checkClosed() error { // The transaction is no longer valid if it has been closed. if tx.closed { @@ -1593,6 +1597,9 @@ func (tx *transaction) close() { tx.pendingBlocks = nil tx.pendingBlockData = nil + // Clear pending file deletions. + tx.pendingDelFileNums = nil + // Clear pending keys that would have been written or deleted on commit. tx.pendingKeys = nil tx.pendingRemove = nil @@ -1619,6 +1626,18 @@ func (tx *transaction) close() { // // This function MUST only be called when there is pending data to be written. func (tx *transaction) writePendingAndCommit() error { + // Loop through all the pending file deletions and delete them. + // We do this first before doing any of the writes as we can't undo + // deletions of files. + for _, fileNum := range tx.pendingDelFileNums { + err := tx.db.store.deleteFileFunc(fileNum) + if err != nil { + // Nothing we can do if we fail to delete blocks besides + // return an error. + return err + } + } + // Save the current block store write position for potential rollback. // These variables are only updated here in this function and there can // only be one write transaction active at a time, so it's safe to store @@ -1669,6 +1688,117 @@ func (tx *transaction) writePendingAndCommit() error { return tx.db.cache.commitTx(tx) } +// PruneBlocks deletes the block files until it reaches the target size +// (specified in bytes). Throws an error if the target size is below +// the maximum size of a single block file. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) PruneBlocks(targetSize uint64) ([]chainhash.Hash, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // Ensure the transaction is writable. + if !tx.writable { + str := "prune blocks requires a writable database transaction" + return nil, makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Make a local alias for the maxBlockFileSize. + maxSize := uint64(tx.db.store.maxBlockFileSize) + if targetSize < maxSize { + return nil, fmt.Errorf("got target size of %d but it must be greater "+ + "than %d, the max size of a single block file", + targetSize, maxSize) + } + + first, last, lastFileSize, err := scanBlockFiles(tx.db.store.basePath) + if err != nil { + return nil, err + } + + // If we have no files on disk or just a single file on disk, return early. + if first == last { + return nil, nil + } + + // Last file number minus the first file number gives us the count of files + // on disk minus 1. We don't want to count the last file since we can't assume + // that it is of max size. + maxSizeFileCount := last - first + + // If the total size of block files are under the target, return early and + // don't prune. + totalSize := uint64(lastFileSize) + (maxSize * uint64(maxSizeFileCount)) + if totalSize <= targetSize { + return nil, nil + } + + log.Tracef("Using %d more bytes than the target of %d MiB. Pruning files...", + totalSize-targetSize, + targetSize/(1024*1024)) + + deletedFiles := make(map[uint32]struct{}) + + // We use < not <= so that the last file is never deleted. There are other checks in place + // but setting it to < here doesn't hurt. + for i := uint32(first); i < uint32(last); i++ { + // Add the block file to be deleted to the list of files pending deletion to + // delete when the transaction is committed. + if tx.pendingDelFileNums == nil { + tx.pendingDelFileNums = make([]uint32, 0, 1) + } + tx.pendingDelFileNums = append(tx.pendingDelFileNums, i) + + // Add the file index to the deleted files map so that we can later + // delete the block location index. + deletedFiles[i] = struct{}{} + + // If we're already at or below the target usage, break and don't + // try to delete more files. + totalSize -= maxSize + if totalSize <= targetSize { + break + } + } + + // Delete the indexed block locations for the files that we've just deleted. + var deletedBlockHashes []chainhash.Hash + cursor := tx.blockIdxBucket.Cursor() + for ok := cursor.First(); ok; ok = cursor.Next() { + loc := deserializeBlockLoc(cursor.Value()) + + _, found := deletedFiles[loc.blockFileNum] + if found { + deletedBlockHashes = append(deletedBlockHashes, *(*chainhash.Hash)(cursor.Key())) + err := cursor.Delete() + if err != nil { + return nil, err + } + } + } + + log.Tracef("Finished pruning. Database now at %d bytes", totalSize) + + return deletedBlockHashes, nil +} + +// BeenPruned returns if the block storage has ever been pruned. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) BeenPruned() (bool, error) { + first, last, _, err := scanBlockFiles(tx.db.store.basePath) + if err != nil { + return false, err + } + + // If the database is pruned, then the first .fdb will not be there. + // We also check that there isn't just 1 file on disk or if there are + // no files on disk by checking if first != last. + return first != 0 && (first != last), nil +} + // Commit commits all changes that have been made to the root metadata bucket // and all of its sub-buckets to the database cache which is periodically synced // to persistent storage. In addition, it commits all new blocks directly to @@ -2016,7 +2146,10 @@ func openDB(dbPath string, network wire.BitcoinNet, create bool) (database.DB, e // according to the data that is actually on disk. Also create the // database cache which wraps the underlying leveldb database to provide // write caching. - store := newBlockStore(dbPath, network) + store, err := newBlockStore(dbPath, network) + if err != nil { + return nil, convertErr(err.Error(), err) + } cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs) pdb := &db{store: store, cache: cache} diff --git a/database/ffldb/doc.go b/database/ffldb/doc.go index 96a2992cb9..0001196746 100644 --- a/database/ffldb/doc.go +++ b/database/ffldb/doc.go @@ -10,7 +10,7 @@ This driver is the recommended driver for use with btcd. It makes use leveldb for the metadata, flat files for block storage, and checksums in key areas to ensure data integrity. -Usage +# Usage This package is a driver to the database package and provides the database type of "ffldb". The parameters the Open and Create functions take are the diff --git a/database/ffldb/driver_test.go b/database/ffldb/driver_test.go index ef35f07840..38a84ee2f9 100644 --- a/database/ffldb/driver_test.go +++ b/database/ffldb/driver_test.go @@ -5,16 +5,18 @@ package ffldb_test import ( + "bytes" "fmt" "os" "path/filepath" "reflect" "testing" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/database/ffldb" - "github.com/btcsuite/btcd/btcutil" ) // dbType is the database type name for this driver. @@ -253,6 +255,179 @@ func TestPersistence(t *testing.T) { } } +// TestPrune tests that the older .fdb files are deleted with a call to prune. +func TestPrune(t *testing.T) { + t.Parallel() + + // Create a new database to run tests against. + dbPath := t.TempDir() + db, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to create test database (%s) %v", dbType, err) + return + } + defer db.Close() + + blockFileSize := uint64(2048) + + testfn := func(t *testing.T, db database.DB) { + // Load the test blocks and save in the test context for use throughout + // the tests. + blocks, err := loadBlocks(t, blockDataFile, blockDataNet) + if err != nil { + t.Errorf("loadBlocks: Unexpected error: %v", err) + return + } + err = db.Update(func(tx database.Tx) error { + for i, block := range blocks { + err := tx.StoreBlock(block) + if err != nil { + return fmt.Errorf("StoreBlock #%d: unexpected error: "+ + "%v", i, err) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } + + blockHashMap := make(map[chainhash.Hash][]byte, len(blocks)) + for _, block := range blocks { + bytes, err := block.Bytes() + if err != nil { + t.Fatal(err) + } + blockHashMap[*block.Hash()] = bytes + } + + err = db.Update(func(tx database.Tx) error { + _, err := tx.PruneBlocks(1024) + if err == nil { + return fmt.Errorf("Expected an error when attempting to prune" + + "below the maxFileSize") + } + + _, err = tx.PruneBlocks(0) + if err == nil { + return fmt.Errorf("Expected an error when attempting to prune" + + "below the maxFileSize") + } + + return nil + }) + if err != nil { + t.Fatal(err) + } + err = db.View(func(tx database.Tx) error { + pruned, err := tx.BeenPruned() + if err != nil { + return err + } + + if pruned { + err = fmt.Errorf("The database hasn't been pruned but " + + "BeenPruned returned true") + } + return err + }) + if err != nil { + t.Fatal(err) + } + + var deletedBlocks []chainhash.Hash + + // This should leave 3 files on disk. + err = db.Update(func(tx database.Tx) error { + deletedBlocks, err = tx.PruneBlocks(blockFileSize * 3) + if err != nil { + return err + } + + pruned, err := tx.BeenPruned() + if err != nil { + return err + } + + if pruned { + err = fmt.Errorf("The database hasn't been commited yet " + + "but files were already deleted") + } + return err + }) + if err != nil { + t.Fatal(err) + } + + // The only error we can get is a bad pattern error. Since we're hardcoding + // the pattern, we should not have an error at runtime. + files, _ := filepath.Glob(filepath.Join(dbPath, "*.fdb")) + if len(files) != 3 { + t.Fatalf("Expected to find %d files but got %d", + 3, len(files)) + } + + err = db.View(func(tx database.Tx) error { + pruned, err := tx.BeenPruned() + if err != nil { + return err + } + + if !pruned { + err = fmt.Errorf("The database has been pruned but " + + "BeenPruned returned false") + } + return err + }) + if err != nil { + t.Fatal(err) + } + + // Check that all the blocks that say were deleted are deleted from the + // block index bucket as well. + err = db.View(func(tx database.Tx) error { + for _, deletedBlock := range deletedBlocks { + _, err := tx.FetchBlock(&deletedBlock) + if dbErr, ok := err.(database.Error); !ok || + dbErr.ErrorCode != database.ErrBlockNotFound { + + return fmt.Errorf("Expected ErrBlockNotFound "+ + "but got %v", dbErr) + } + } + + return nil + }) + if err != nil { + t.Fatal(err) + } + + // Check that the not deleted blocks are present. + for _, deletedBlock := range deletedBlocks { + delete(blockHashMap, deletedBlock) + } + err = db.View(func(tx database.Tx) error { + for hash, wantBytes := range blockHashMap { + gotBytes, err := tx.FetchBlock(&hash) + if err != nil { + return err + } + if !bytes.Equal(gotBytes, wantBytes) { + return fmt.Errorf("got bytes %x, want bytes %x", + gotBytes, wantBytes) + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } + } + ffldb.TstRunWithMaxBlockFileSize(db, uint32(blockFileSize), func() { + testfn(t, db) + }) +} + // TestInterface performs all interfaces tests for this database driver. func TestInterface(t *testing.T) { t.Parallel() diff --git a/database/ffldb/export_test.go b/database/ffldb/export.go similarity index 63% rename from database/ffldb/export_test.go rename to database/ffldb/export.go index 2d8e4d2a2b..0802167ee0 100644 --- a/database/ffldb/export_test.go +++ b/database/ffldb/export.go @@ -2,20 +2,17 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -/* -This test file is part of the ffldb package rather than than the ffldb_test -package so it can bridge access to the internals to properly test cases which -are either not possible or can't reliably be tested via the public interface. -The functions are only exported while the tests are being run. -*/ - package ffldb -import "github.com/btcsuite/btcd/database" +import ( + "github.com/btcsuite/btcd/database" +) // TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed // file size for the database set to the provided value. The value will be set // back to the original value upon completion. +// +// Callers should only use this for testing. func TstRunWithMaxBlockFileSize(idb database.DB, size uint32, fn func()) { ffldb := idb.(*db) origSize := ffldb.store.maxBlockFileSize diff --git a/database/ffldb/interface_test.go b/database/ffldb/interface_test.go index af26faccab..b0f275c5de 100644 --- a/database/ffldb/interface_test.go +++ b/database/ffldb/interface_test.go @@ -25,11 +25,11 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) var ( @@ -1212,7 +1212,7 @@ func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool { // testFetchBlockIO ensures all of the block retrieval API functions work as // expected for the provide set of blocks. The blocks must already be stored in -// the database, or at least stored into the the passed transaction. It also +// the database, or at least stored into the passed transaction. It also // tests several error conditions such as ensuring the expected errors are // returned when fetching blocks, headers, and regions that don't exist. func testFetchBlockIO(tc *testContext, tx database.Tx) bool { diff --git a/database/ffldb/whitebox_test.go b/database/ffldb/whitebox_test.go index f2eae8ce09..cc7c13d45f 100644 --- a/database/ffldb/whitebox_test.go +++ b/database/ffldb/whitebox_test.go @@ -17,10 +17,10 @@ import ( "path/filepath" "testing" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" "github.com/syndtr/goleveldb/leveldb" ldberrors "github.com/syndtr/goleveldb/leveldb/errors" ) diff --git a/database/interface.go b/database/interface.go index aa88cc3723..7efc7c55f6 100644 --- a/database/interface.go +++ b/database/interface.go @@ -8,8 +8,8 @@ package database import ( - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" ) // Cursor represents a cursor over key/value pairs and nested buckets of a @@ -389,6 +389,26 @@ type Tx interface { // implementations. FetchBlockRegions(regions []BlockRegion) ([][]byte, error) + // PruneBlocks deletes the block files until it reaches the target size + // (specificed in bytes). + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + PruneBlocks(targetSize uint64) ([]chainhash.Hash, error) + + // BeenPruned returns if the block storage has ever been pruned. + // + // Implementation specific errors are possible. + BeenPruned() (bool, error) + // ****************************************************************** // Methods related to both atomic metadata storage and block storage. // ****************************************************************** diff --git a/database/internal/treap/immutable_test.go b/database/internal/treap/immutable_test.go index e8952c3846..e0a1cb4af6 100644 --- a/database/internal/treap/immutable_test.go +++ b/database/internal/treap/immutable_test.go @@ -344,7 +344,7 @@ func TestImmutableDuplicatePut(t *testing.T) { testTreap = testTreap.Put(key, key) expectedSize += nodeFieldsSize + uint64(len(key)+len(key)) - // Put a duplicate key with the the expected final value. + // Put a duplicate key with the expected final value. testTreap = testTreap.Put(key, expectedVal) // Ensure the key still exists and is the new value. diff --git a/database/internal/treap/treapiter.go b/database/internal/treap/treapiter.go index d6981aafd8..ae7ed853b8 100644 --- a/database/internal/treap/treapiter.go +++ b/database/internal/treap/treapiter.go @@ -318,13 +318,14 @@ func (iter *Iterator) ForceReseek() { // unexpected keys and/or values. // // For example: -// iter := t.Iterator(nil, nil) -// for iter.Next() { -// if someCondition { -// t.Delete(iter.Key()) -// iter.ForceReseek() -// } -// } +// +// iter := t.Iterator(nil, nil) +// for iter.Next() { +// if someCondition { +// t.Delete(iter.Key()) +// iter.ForceReseek() +// } +// } func (t *Mutable) Iterator(startKey, limitKey []byte) *Iterator { iter := &Iterator{ t: t, diff --git a/doc.go b/doc.go index 70d0d9e45c..47e4e626b7 100644 --- a/doc.go +++ b/doc.go @@ -18,143 +18,145 @@ on Windows. The -C (--configfile) flag, as shown below, can be used to override this location. Usage: - btcd [OPTIONS] + + btcd [OPTIONS] Application Options: - --addcheckpoint= Add a custom checkpoint. Format: - ':' - -a, --addpeer= Add a peer to connect with at startup - --addrindex Maintain a full address-based transaction index - which makes the searchrawtransactions RPC - available - --banduration= How long to ban misbehaving peers. Valid time - units are {s, m, h}. Minimum 1 second (default: - 24h0m0s) - --banthreshold= Maximum allowed ban score before disconnecting - and banning misbehaving peers. (default: 100) - --blockmaxsize= Maximum block size in bytes to be used when - creating a block (default: 750000) - --blockminsize= Mininum block size in bytes to be used when - creating a block - --blockmaxweight= Maximum block weight to be used when creating a - block (default: 3000000) - --blockminweight= Mininum block weight to be used when creating a - block - --blockprioritysize= Size in bytes for high-priority/low-fee - transactions when creating a block (default: - 50000) - --blocksonly Do not accept transactions from remote peers. - -C, --configfile= Path to configuration file - --connect= Connect only to the specified peers at startup - --cpuprofile= Write CPU profile to the specified file - -b, --datadir= Directory to store data - --dbtype= Database backend to use for the Block Chain - (default: ffldb) - -d, --debuglevel= Logging level for all subsystems {trace, debug, - info, warn, error, critical} -- You may also - specify - =,=,... to - set the log level for individual subsystems -- - Use show to list available subsystems (default: - info) - --dropaddrindex Deletes the address-based transaction index from - the database on start up and then exits. - --dropcfindex Deletes the index used for committed filtering - (CF) support from the database on start up and - then exits. - --droptxindex Deletes the hash-based transaction index from the - database on start up and then exits. - --externalip= Add an ip to the list of local addresses we claim - to listen on to peers - --generate Generate (mine) bitcoins using the CPU - --limitfreerelay= Limit relay of transactions with no transaction - fee to the given amount in thousands of bytes per - minute (default: 15) - --listen= Add an interface/port to listen for connections - (default all interfaces port: 8333, testnet: - 18333, signet: 38333) - --logdir= Directory to log output - --maxorphantx= Max number of orphan transactions to keep in - memory (default: 100) - --maxpeers= Max number of inbound and outbound peers - (default: 125) - --miningaddr= Add the specified payment address to the list of - addresses to use for generated blocks -- At least - one address is required if the generate option is - set - --minrelaytxfee= The minimum transaction fee in BTC/kB to be - considered a non-zero fee. (default: 1e-05) - --nobanning Disable banning of misbehaving peers - --nocfilters Disable committed filtering (CF) support - --nocheckpoints Disable built-in checkpoints. Don't do this - unless you know what you're doing. - --nodnsseed Disable DNS seeding for peers - --nolisten Disable listening for incoming connections -- - NOTE: Listening is automatically disabled if the - --connect or --proxy options are used without - also specifying listen interfaces via --listen - --noonion Disable connecting to tor hidden services - --nopeerbloomfilters Disable bloom filtering support - --norelaypriority Do not require free or low-fee transactions to - have high priority for relaying - --norpc Disable built-in RPC server -- NOTE: The RPC - server is disabled by default if no - rpcuser/rpcpass or rpclimituser/rpclimitpass is - specified - --notls Disable TLS for the RPC server -- NOTE: This is - only allowed if the RPC server is bound to - localhost - --onion= Connect to tor hidden services via SOCKS5 proxy - (eg. 127.0.0.1:9050) - --onionpass= Password for onion proxy server - --onionuser= Username for onion proxy server - --profile= Enable HTTP profiling on given port -- NOTE port - must be between 1024 and 65536 - --proxy= Connect via SOCKS5 proxy (eg. 127.0.0.1:9050) - --proxypass= Password for proxy server - --proxyuser= Username for proxy server - --regtest Use the regression test network - --rejectnonstd Reject non-standard transactions regardless of - the default settings for the active network. - --relaynonstd Relay non-standard transactions regardless of the - default settings for the active network. - --rpccert= File containing the certificate file - --rpckey= File containing the certificate key - --rpclimitpass= Password for limited RPC connections - --rpclimituser= Username for limited RPC connections - --rpclisten= Add an interface/port to listen for RPC - connections (default port: 8334, testnet: 18334) - --rpcmaxclients= Max number of RPC clients for standard - connections (default: 10) - --rpcmaxconcurrentreqs= Max number of concurrent RPC requests that may be - processed concurrently (default: 20) - --rpcmaxwebsockets= Max number of RPC websocket connections (default: - 25) - --rpcquirks Mirror some JSON-RPC quirks of Bitcoin Core -- - NOTE: Discouraged unless interoperability issues - need to be worked around - -P, --rpcpass= Password for RPC connections - -u, --rpcuser= Username for RPC connections - --sigcachemaxsize= The maximum number of entries in the signature - verification cache (default: 100000) - --simnet Use the simulation test network - --testnet Use the test network - --torisolation Enable Tor stream isolation by randomizing user - credentials for each connection. - --trickleinterval= Minimum time between attempts to send new - inventory to a connected peer (default: 10s) - --txindex Maintain a full hash-based transaction index - which makes all transactions available via the - getrawtransaction RPC - --uacomment= Comment to add to the user agent -- See BIP 14 - for more information. - --upnp Use UPnP to map our listening port outside of NAT - -V, --version Display version information and exit - --whitelist= Add an IP network or IP that will not be banned. - (eg. 192.168.1.0/24 or ::1) + + --addcheckpoint= Add a custom checkpoint. Format: + ':' + -a, --addpeer= Add a peer to connect with at startup + --addrindex Maintain a full address-based transaction index + which makes the searchrawtransactions RPC + available + --banduration= How long to ban misbehaving peers. Valid time + units are {s, m, h}. Minimum 1 second (default: + 24h0m0s) + --banthreshold= Maximum allowed ban score before disconnecting + and banning misbehaving peers. (default: 100) + --blockmaxsize= Maximum block size in bytes to be used when + creating a block (default: 750000) + --blockminsize= Minimum block size in bytes to be used when + creating a block + --blockmaxweight= Maximum block weight to be used when creating a + block (default: 3000000) + --blockminweight= Minimum block weight to be used when creating a + block + --blockprioritysize= Size in bytes for high-priority/low-fee + transactions when creating a block (default: + 50000) + --blocksonly Do not accept transactions from remote peers. + -C, --configfile= Path to configuration file + --connect= Connect only to the specified peers at startup + --cpuprofile= Write CPU profile to the specified file + -b, --datadir= Directory to store data + --dbtype= Database backend to use for the Block Chain + (default: ffldb) + -d, --debuglevel= Logging level for all subsystems {trace, debug, + info, warn, error, critical} -- You may also + specify + =,=,... to + set the log level for individual subsystems -- + Use show to list available subsystems (default: + info) + --dropaddrindex Deletes the address-based transaction index from + the database on start up and then exits. + --dropcfindex Deletes the index used for committed filtering + (CF) support from the database on start up and + then exits. + --droptxindex Deletes the hash-based transaction index from the + database on start up and then exits. + --externalip= Add an ip to the list of local addresses we claim + to listen on to peers + --generate Generate (mine) bitcoins using the CPU + --limitfreerelay= Limit relay of transactions with no transaction + fee to the given amount in thousands of bytes per + minute (default: 15) + --listen= Add an interface/port to listen for connections + (default all interfaces port: 8333, testnet: + 18333, signet: 38333) + --logdir= Directory to log output + --maxorphantx= Max number of orphan transactions to keep in + memory (default: 100) + --maxpeers= Max number of inbound and outbound peers + (default: 125) + --miningaddr= Add the specified payment address to the list of + addresses to use for generated blocks -- At least + one address is required if the generate option is + set + --minrelaytxfee= The minimum transaction fee in BTC/kB to be + considered a non-zero fee. (default: 1e-05) + --nobanning Disable banning of misbehaving peers + --nocfilters Disable committed filtering (CF) support + --nocheckpoints Disable built-in checkpoints. Don't do this + unless you know what you're doing. + --nodnsseed Disable DNS seeding for peers + --nolisten Disable listening for incoming connections -- + NOTE: Listening is automatically disabled if the + --connect or --proxy options are used without + also specifying listen interfaces via --listen + --noonion Disable connecting to tor hidden services + --nopeerbloomfilters Disable bloom filtering support + --norelaypriority Do not require free or low-fee transactions to + have high priority for relaying + --norpc Disable built-in RPC server -- NOTE: The RPC + server is disabled by default if no + rpcuser/rpcpass or rpclimituser/rpclimitpass is + specified + --notls Disable TLS for the RPC server -- NOTE: This is + only allowed if the RPC server is bound to + localhost + --onion= Connect to tor hidden services via SOCKS5 proxy + (eg. 127.0.0.1:9050) + --onionpass= Password for onion proxy server + --onionuser= Username for onion proxy server + --profile= Enable HTTP profiling on given port -- NOTE port + must be between 1024 and 65536 + --proxy= Connect via SOCKS5 proxy (eg. 127.0.0.1:9050) + --proxypass= Password for proxy server + --proxyuser= Username for proxy server + --regtest Use the regression test network + --rejectnonstd Reject non-standard transactions regardless of + the default settings for the active network. + --relaynonstd Relay non-standard transactions regardless of the + default settings for the active network. + --rpccert= File containing the certificate file + --rpckey= File containing the certificate key + --rpclimitpass= Password for limited RPC connections + --rpclimituser= Username for limited RPC connections + --rpclisten= Add an interface/port to listen for RPC + connections (default port: 8334, testnet: 18334) + --rpcmaxclients= Max number of RPC clients for standard + connections (default: 10) + --rpcmaxconcurrentreqs= Max number of concurrent RPC requests that may be + processed concurrently (default: 20) + --rpcmaxwebsockets= Max number of RPC websocket connections (default: + 25) + --rpcquirks Mirror some JSON-RPC quirks of Bitcoin Core -- + NOTE: Discouraged unless interoperability issues + need to be worked around + -P, --rpcpass= Password for RPC connections + -u, --rpcuser= Username for RPC connections + --sigcachemaxsize= The maximum number of entries in the signature + verification cache (default: 100000) + --simnet Use the simulation test network + --testnet Use the test network + --torisolation Enable Tor stream isolation by randomizing user + credentials for each connection. + --trickleinterval= Minimum time between attempts to send new + inventory to a connected peer (default: 10s) + --txindex Maintain a full hash-based transaction index + which makes all transactions available via the + getrawtransaction RPC + --uacomment= Comment to add to the user agent -- See BIP 14 + for more information. + --upnp Use UPnP to map our listening port outside of NAT + -V, --version Display version information and exit + --whitelist= Add an IP network or IP that will not be banned. + (eg. 192.168.1.0/24 or ::1) Help Options: - -h, --help Show this help message + -h, --help Show this help message */ package main diff --git a/docs/code_contribution_guidelines.md b/docs/code_contribution_guidelines.md index c0a7eecc5f..da775878da 100644 --- a/docs/code_contribution_guidelines.md +++ b/docs/code_contribution_guidelines.md @@ -297,7 +297,7 @@ Rejoice as you will now be listed as a [contributor](https://github.com/btcsuite ## Contribution Checklist -- [  ] All changes are Go version 1.3 compliant +- [  ] All changes are Go version 1.17 compliant - [  ] The code being submitted is commented according to the [Code Documentation and Commenting](#CodeDocumentation) section - [  ] For new code: Code is accompanied by tests which exercise both diff --git a/docs/installation.md b/docs/installation.md index a74db56022..f6670bd50a 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -5,7 +5,7 @@ details on how to install on the supported operating systems. ## Requirements -[Go](http://golang.org) 1.16 or newer. +[Go](http://golang.org) 1.17 or newer. ## GPG Verification Key @@ -54,7 +54,7 @@ recommended that `GOPATH` is set to a directory in your home directory such as ```bash git clone https://github.com/btcsuite/btcd $GOPATH/src/github.com/btcsuite/btcd cd $GOPATH/src/github.com/btcsuite/btcd -GO111MODULE=on go install -v . ./cmd/... +go install -v . ./cmd/... ``` * btcd (and utilities) will now be installed in ```$GOPATH/bin```. If you did diff --git a/docs/json_rpc_api.md b/docs/json_rpc_api.md index d1b43ce8c4..2c7d455457 100644 --- a/docs/json_rpc_api.md +++ b/docs/json_rpc_api.md @@ -168,26 +168,27 @@ the method name for further details such as parameter and return information. |8|[getblockcount](#getblockcount)|Y|Returns the number of blocks in the longest block chain.| |9|[getblockhash](#getblockhash)|Y|Returns hash of the block in best block chain at the given height.| |10|[getblockheader](#getblockheader)|Y|Returns the block header of the block.| -|11|[getconnectioncount](#getconnectioncount)|N|Returns the number of active connections to other peers.| -|12|[getdifficulty](#getdifficulty)|Y|Returns the proof-of-work difficulty as a multiple of the minimum difficulty.| -|13|[getgenerate](#getgenerate)|N|Return if the server is set to generate coins (mine) or not.| -|14|[gethashespersec](#gethashespersec)|N|Returns a recent hashes per second performance measurement while generating coins (mining).| -|15|[getinfo](#getinfo)|Y|Returns a JSON object containing various state info.| -|16|[getmempoolinfo](#getmempoolinfo)|N|Returns a JSON object containing mempool-related information.| -|17|[getmininginfo](#getmininginfo)|N|Returns a JSON object containing mining-related information.| -|18|[getnettotals](#getnettotals)|Y|Returns a JSON object containing network traffic statistics.| -|19|[getnetworkhashps](#getnetworkhashps)|Y|Returns the estimated network hashes per second for the block heights provided by the parameters.| -|20|[getpeerinfo](#getpeerinfo)|N|Returns information about each connected network peer as an array of json objects.| -|21|[getrawmempool](#getrawmempool)|Y|Returns an array of hashes for all of the transactions currently in the memory pool.| -|22|[getrawtransaction](#getrawtransaction)|Y|Returns information about a transaction given its hash.| -|23|[help](#help)|Y|Returns a list of all commands or help for a specified command.| -|24|[ping](#ping)|N|Queues a ping to be sent to each connected peer.| -|25|[sendrawtransaction](#sendrawtransaction)|Y|Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.
btcd does not yet implement the `allowhighfees` parameter, so it has no effect| -|26|[setgenerate](#setgenerate) |N|Set the server to generate coins (mine) or not.
NOTE: Since btcd does not have the wallet integrated to provide payment addresses, btcd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| -|27|[stop](#stop)|N|Shutdown btcd.| -|28|[submitblock](#submitblock)|Y|Attempts to submit a new serialized, hex-encoded block to the network.| -|29|[validateaddress](#validateaddress)|Y|Verifies the given address is valid. NOTE: Since btcd does not have a wallet integrated, btcd will only return whether the address is valid or not.| -|30|[verifychain](#verifychain)|N|Verifies the block chain database.| +|11|[getchaintips](#getchaintips)|Y|Returns information about all known tips in the block tree, including the main chain as well as orphaned branches.| +|12|[getconnectioncount](#getconnectioncount)|N|Returns the number of active connections to other peers.| +|13|[getdifficulty](#getdifficulty)|Y|Returns the proof-of-work difficulty as a multiple of the minimum difficulty.| +|14|[getgenerate](#getgenerate)|N|Return if the server is set to generate coins (mine) or not.| +|15|[gethashespersec](#gethashespersec)|N|Returns a recent hashes per second performance measurement while generating coins (mining).| +|16|[getinfo](#getinfo)|Y|Returns a JSON object containing various state info.| +|17|[getmempoolinfo](#getmempoolinfo)|N|Returns a JSON object containing mempool-related information.| +|18|[getmininginfo](#getmininginfo)|N|Returns a JSON object containing mining-related information.| +|19|[getnettotals](#getnettotals)|Y|Returns a JSON object containing network traffic statistics.| +|20|[getnetworkhashps](#getnetworkhashps)|Y|Returns the estimated network hashes per second for the block heights provided by the parameters.| +|21|[getpeerinfo](#getpeerinfo)|N|Returns information about each connected network peer as an array of json objects.| +|22|[getrawmempool](#getrawmempool)|Y|Returns an array of hashes for all of the transactions currently in the memory pool.| +|23|[getrawtransaction](#getrawtransaction)|Y|Returns information about a transaction given its hash.| +|24|[help](#help)|Y|Returns a list of all commands or help for a specified command.| +|25|[ping](#ping)|N|Queues a ping to be sent to each connected peer.| +|26|[sendrawtransaction](#sendrawtransaction)|Y|Submits the serialized, hex-encoded transaction to the local peer and relays it to the network.
btcd does not yet implement the `allowhighfees` parameter, so it has no effect| +|27|[setgenerate](#setgenerate) |N|Set the server to generate coins (mine) or not.
NOTE: Since btcd does not have the wallet integrated to provide payment addresses, btcd must be configured via the `--miningaddr` option to provide which payment addresses to pay created blocks to for this RPC to function.| +|28|[stop](#stop)|N|Shutdown btcd.| +|29|[submitblock](#submitblock)|Y|Attempts to submit a new serialized, hex-encoded block to the network.| +|30|[validateaddress](#validateaddress)|Y|Verifies the given address is valid. NOTE: Since btcd does not have a wallet integrated, btcd will only return whether the address is valid or not.| +|31|[verifychain](#verifychain)|N|Verifies the block chain database.| @@ -319,6 +320,18 @@ the method name for further details such as parameter and return information. |Example Return (verbose=true)|`{`
  `"hash": "00000000009e2958c15ff9290d571bf9459e93b19765c6801ddeccadbb160a1e",`
  `"confirmations": 392076,`
  `"height": 100000,`
  `"version": 2,`
  `"merkleroot": "d574f343976d8e70d91cb278d21044dd8a396019e6db70755a0a50e4783dba38",`
  `"time": 1376123972,`
  `"nonce": 1005240617,`
  `"bits": "1c00f127",`
  `"difficulty": 271.75767393,`
  `"previousblockhash": "000000004956cc2edd1a8caa05eacfa3c69f4c490bfc9ace820257834115ab35",`
  `"nextblockhash": "0000000000629d100db387f37d0f37c51118f250fb0946310a8c37316cbc4028"`
`}`| [Return to Overview](#MethodOverview)
+*** +
+ +| | | +|---|---| +|Method|getchaintips| +|Parameters|None| +|Description|Returns information about all known tips in the block tree, including the main chain as well as orphaned branches| +|Returns|`(A json object array)`
`height`: `(numeric)` The height of the chain tip.
`hash`: `(string)` The block hash of the chain tip.
`branchlen`: `(numeric)` Returns zero for main chain. Otherwise is the length of branch connecting the tip to the main chain.
`status`: `(string)` Status of the chain. Returns "active" for the main chain.`| +|Example Return|`["{"height": 1, "hash": "78b945a390c561cf8b9ccf0598be15d7d85c67022bf71083c0b0bd8042fc30d7", "branchlen": 1, "status": "valid-fork"}, {"height": 1, "hash": "584c830a4783c6331e59cb984686cfec14bccc596fe8bbd1660b90cda359b42a", "branchlen": 0, "status": "active"}"]`| +[Return to Overview](#MethodOverview)
+ ***
diff --git a/docs/mining.md b/docs/mining.md index 29a3e89858..e6218bed84 100644 --- a/docs/mining.md +++ b/docs/mining.md @@ -21,9 +21,9 @@ certificate into the default system Certificate Authority list. ## Ubuntu -1. Copy rpc.cert to /usr/share/ca-certificates: `# cp /home/user/.btcd/rpc.cert /usr/share/ca-certificates/btcd.crt` -2. Add btcd.crt to /etc/ca-certificates.conf: `# echo btcd.crt >> /etc/ca-certificates.conf` -3. Update the CA certificate list: `# update-ca-certificates` +1. Copy rpc.cert to /usr/share/ca-certificates: `cp /home/user/.btcd/rpc.cert /usr/share/ca-certificates/btcd.crt` +2. Add btcd.crt to /etc/ca-certificates.conf: `echo btcd.crt >> /etc/ca-certificates.conf` +3. Update the CA certificate list: `update-ca-certificates` ## Set your mining software url to use https diff --git a/docs/update.md b/docs/update.md index 1fb847cf9f..3e411a628b 100644 --- a/docs/update.md +++ b/docs/update.md @@ -4,5 +4,5 @@ ```bash cd $GOPATH/src/github.com/btcsuite/btcd -git pull && GO111MODULE=on go install -v . ./cmd/... +git pull && go install -v . ./cmd/... ``` diff --git a/docs/using_docker.md b/docs/using_docker.md index 0809abc1c8..6cd41b9d7c 100644 --- a/docs/using_docker.md +++ b/docs/using_docker.md @@ -93,7 +93,7 @@ services: btcd: container_name: btcd hostname: btcd - image: btcsuite/btcd:latest + build: https://github.com/btcsuite/btcd.git#master restart: unless-stopped volumes: - btcd-data:/root/.btcd @@ -115,7 +115,7 @@ services: btcd: container_name: btcd hostname: btcd - image: btcsuite/btcd:latest + build: https://github.com/btcsuite/btcd.git#master restart: unless-stopped volumes: - btcd-data:/root/.btcd @@ -142,7 +142,7 @@ services: btcd: container_name: btcd hostname: btcd - image: btcsuite/btcd:latest + build: https://github.com/btcsuite/btcd.git#master restart: unless-stopped volumes: - btcd-data:/root/.btcd diff --git a/go.mod b/go.mod index 2e3333acc1..425e6d7f2b 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,8 @@ module github.com/btcsuite/btcd require ( github.com/btcsuite/btcd/btcec/v2 v2.1.3 - github.com/btcsuite/btcd/btcutil v1.1.0 - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 + github.com/btcsuite/btcd/btcutil v1.1.5 + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 @@ -11,6 +11,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 github.com/decred/dcrd/lru v1.0.0 + github.com/gorilla/websocket v1.5.0 github.com/jessevdk/go-flags v1.4.0 github.com/jrick/logrotate v1.0.0 github.com/stretchr/testify v1.7.0 @@ -28,8 +29,6 @@ require ( gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) -replace github.com/btcsuite/btcd/btcutil => ./btcutil - // The retract statements below fixes an accidental push of the tags of a btcd // fork. retract ( diff --git a/go.sum b/go.sum index 1e39ef3263..158e868092 100644 --- a/go.sum +++ b/go.sum @@ -1,19 +1,33 @@ github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3 h1:xM/n3yIhHAhHy04z4i43C8p4ehixJZMsnrVJkgl+MTE= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -38,7 +52,10 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= @@ -48,9 +65,12 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -61,9 +81,11 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -99,6 +121,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/integration/bip0009_test.go b/integration/bip0009_test.go index 67b15f3a5b..5b64480410 100644 --- a/integration/bip0009_test.go +++ b/integration/bip0009_test.go @@ -320,19 +320,20 @@ func testBIP0009(t *testing.T, forkKey string, deploymentID uint32) { // - Assert the chain height is 0 and the state is ThresholdDefined // - Generate 1 fewer blocks than needed to reach the first state transition // - Assert chain height is expected and state is still ThresholdDefined +// // - Generate 1 more block to reach the first state transition // - Assert chain height is expected and state moved to ThresholdStarted -// - Generate enough blocks to reach the next state transition window, but only -// signal support in 1 fewer than the required number to achieve -// ThresholdLockedIn +// - Generate enough blocks to reach the next state transition window, but only +// signal support in 1 fewer than the required number to achieve +// ThresholdLockedIn // - Assert chain height is expected and state is still ThresholdStarted -// - Generate enough blocks to reach the next state transition window with only -// the exact number of blocks required to achieve locked in status signalling -// support. +// - Generate enough blocks to reach the next state transition window with only +// the exact number of blocks required to achieve locked in status signalling +// support. // - Assert chain height is expected and state moved to ThresholdLockedIn -// - Generate 1 fewer blocks than needed to reach the next state transition +// - Generate 1 fewer blocks than needed to reach the next state transition // - Assert chain height is expected and state is still ThresholdLockedIn -// - Generate 1 more block to reach the next state transition +// - Generate 1 more block to reach the next state transition // - Assert chain height is expected and state moved to ThresholdActive func TestBIP0009(t *testing.T) { t.Parallel() @@ -348,11 +349,14 @@ func TestBIP0009(t *testing.T) { // Overview: // - Generate block 1 // - Assert bit is NOT set (ThresholdDefined) +// // - Generate enough blocks to reach first state transition // - Assert bit is NOT set for block prior to state transition // - Assert bit is set for block at state transition (ThresholdStarted) +// // - Generate enough blocks to reach second state transition // - Assert bit is set for block at state transition (ThresholdLockedIn) +// // - Generate enough blocks to reach third state transition // - Assert bit is set for block prior to state transition (ThresholdLockedIn) // - Assert bit is NOT set for block at state transition (ThresholdActive) diff --git a/integration/csv_fork_test.go b/integration/csv_fork_test.go index 45ab8ad1aa..6b03af95de 100644 --- a/integration/csv_fork_test.go +++ b/integration/csv_fork_test.go @@ -17,12 +17,12 @@ import ( "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/integration/rpctest" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( @@ -95,17 +95,18 @@ func makeTestOutput(r *rpctest.Harness, t *testing.T, // them. // // Overview: -// - Pre soft-fork: -// - Transactions with non-final lock-times from the PoV of MTP should be -// rejected from the mempool. -// - Transactions within non-final MTP based lock-times should be accepted -// in valid blocks. // -// - Post soft-fork: -// - Transactions with non-final lock-times from the PoV of MTP should be -// rejected from the mempool and when found within otherwise valid blocks. -// - Transactions with final lock-times from the PoV of MTP should be -// accepted to the mempool and mined in future block. +// - Pre soft-fork: +// 1. Transactions with non-final lock-times from the PoV of MTP should be +// rejected from the mempool. +// 2. Transactions within non-final MTP based lock-times should be accepted +// in valid blocks. +// +// - Post soft-fork: +// 1. Transactions with non-final lock-times from the PoV of MTP should be +// rejected from the mempool and when found within otherwise valid blocks. +// 2. Transactions with final lock-times from the PoV of MTP should be +// accepted to the mempool and mined in future block. func TestBIP0113Activation(t *testing.T) { t.Parallel() @@ -391,13 +392,14 @@ func assertTxInBlock(r *rpctest.Harness, t *testing.T, blockHash *chainhash.Hash // 112 and BIP 68 rule-set after the activation of the CSV-package soft-fork. // // Overview: -// - Pre soft-fork: -// - A transaction spending a CSV output validly should be rejected from the -// mempool, but accepted in a valid generated block including the -// transaction. -// - Post soft-fork: -// - See the cases exercised within the table driven tests towards the end -// of this test. +// - Pre soft-fork: +// 1. A transaction spending a CSV output validly should be rejected from the +// mempool, but accepted in a valid generated block including the +// transaction. +// +// - Post soft-fork: +// 1. See the cases exercised within the table driven tests towards the end +// of this test. func TestBIP0068AndBIP0112Activation(t *testing.T) { t.Parallel() diff --git a/integration/getchaintips_test.go b/integration/getchaintips_test.go new file mode 100644 index 0000000000..1570ba740c --- /dev/null +++ b/integration/getchaintips_test.go @@ -0,0 +1,350 @@ +package integration + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/integration/rpctest" + "github.com/stretchr/testify/require" +) + +func getBlockFromString(t *testing.T, hexStr string) *btcutil.Block { + t.Helper() + + serializedBlock, err := hex.DecodeString(hexStr) + if err != nil { + t.Fatalf("couldn't decode hex string of %s", hexStr) + } + + block, err := btcutil.NewBlockFromBytes(serializedBlock) + if err != nil { + t.Fatalf("couldn't make a new block from bytes. "+ + "Decoded hex string: %s", hexStr) + } + + return block +} + +// compareMultipleChainTips checks that all the expected chain tips are included in got chain tips and +// verifies that the got chain tip matches the expected chain tip. +func compareMultipleChainTips(t *testing.T, gotChainTips, expectedChainTips []*btcjson.GetChainTipsResult) error { + if len(gotChainTips) != len(expectedChainTips) { + return fmt.Errorf("Expected %d chaintips but got %d", len(expectedChainTips), len(gotChainTips)) + } + + gotChainTipsMap := make(map[string]btcjson.GetChainTipsResult) + for _, gotChainTip := range gotChainTips { + gotChainTipsMap[gotChainTip.Hash] = *gotChainTip + } + + for _, expectedChainTip := range expectedChainTips { + gotChainTip, found := gotChainTipsMap[expectedChainTip.Hash] + if !found { + return fmt.Errorf("Couldn't find expected chaintip with hash %s", expectedChainTip.Hash) + } + + require.Equal(t, gotChainTip, *expectedChainTip) + } + + return nil +} + +func TestGetChainTips(t *testing.T) { + // block1Hex is a block that builds on top of the regtest genesis block. + // Has blockhash of "36c056247e8c0589f6307995e4e13acf2b2b79cad9ecd5a4eeab2131ed0ecde5". + block1Hex := "0000002006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf18891" + + "0f71881025ae0d41ce8748b79ac40e5f3197af3bb83a594def7943aff0fce504c638ea6d63f" + + "fff7f2000000000010200000000010100000000000000000000000000000000000000000000" + + "00000000000000000000ffffffff025100ffffffff0200f2052a010000001600149b0f9d020" + + "8b3b425246e16830562a63bf1c701180000000000000000266a24aa21a9ede2f61c3f71d1de" + + "fd3fa999dfa36953755c690689799962b48bebd836974e8cf90120000000000000000000000" + + "000000000000000000000000000000000000000000000000000" + + // block2Hex is a block that builds on top of block1Hex. + // Has blockhash of "664b51334782a4ad16e8471b530dcd0027c75b8c25187b41dfc85ecd353295c6". + block2Hex := "00000020e5cd0eed3121abeea4d5ecd9ca792b2bcf3ae1e4957930f689058c7e2456c0" + + "362a78a11b875d31af2ea493aa5b6b623e0d481f11e69f7147ab974be9da087f3e24696f63f" + + "fff7f2001000000010200000000010100000000000000000000000000000000000000000000" + + "00000000000000000000ffffffff025200ffffffff0200f2052a0100000016001470fea1feb" + + "4969c1f237753ae29c0217c6637835c0000000000000000266a24aa21a9ede2f61c3f71d1de" + + "fd3fa999dfa36953755c690689799962b48bebd836974e8cf90120000000000000000000000" + + "000000000000000000000000000000000000000000000000000" + + // block3Hex is a block that builds on top of block2Hex. + // Has blockhash of "17a5c5cb90ecde5a46dd195d434eea46b653e35e4517070eade429db3ac83944". + block3Hex := "00000020c6953235cd5ec8df417b18258c5bc72700cd0d531b47e816ada4824733514b" + + "66c3ad4d567a36c20df07ea0b7fce1e4b4ee5be3eaf0b946b0ae73f3a74d47f0cf99696f63f" + + "fff7f2000000000010200000000010100000000000000000000000000000000000000000000" + + "00000000000000000000ffffffff025300ffffffff0200f2052a010000001600140e835869b" + + "154f647d11376634b5e8c785e7d21060000000000000000266a24aa21a9ede2f61c3f71d1de" + + "fd3fa999dfa36953755c690689799962b48bebd836974e8cf90120000000000000000000000" + + "000000000000000000000000000000000000000000000000000" + + // block4Hex is a block that builds on top of block3Hex. + // Has blockhash of "7b357f3073c4397d6d069a32a09141c32560f3c62233ca138eb5e03c5991f45c". + block4Hex := "000000204439c83adb29e4ad0e0717455ee353b646ea4e435d19dd465adeec90cbc5a5" + + "17ab639a5dd622e90f5f9feffc1c7c28f47a2caf85c21d7dd52cd223a7164619e37a6a6f63f" + + "fff7f2004000000010200000000010100000000000000000000000000000000000000000000" + + "00000000000000000000ffffffff025400ffffffff0200f2052a01000000160014a157c74b4" + + "42a3e11b45cf5273f8c0c032c5a40ed0000000000000000266a24aa21a9ede2f61c3f71d1de" + + "fd3fa999dfa36953755c690689799962b48bebd836974e8cf90120000000000000000000000" + + "000000000000000000000000000000000000000000000000000" + + // block2aHex is a block that builds on top of block1Hex. + // Has blockhash of "5181a4e34cc23ed95c69749dedf4cc7ebd659243bc1683372f8940c8cd8f9b68". + block2aHex := "00000020e5cd0eed3121abeea4d5ecd9ca792b2bcf3ae1e4957930f689058c7e2456c" + + "036f7d4ebe524260c9b6c2b5e3d105cad0b7ddfaeaa29971363574fc1921a3f2f7ad66b6f63" + + "ffff7f200100000001020000000001010000000000000000000000000000000000000000000" + + "000000000000000000000ffffffff025200ffffffff0200f2052a0100000016001466fca22d" + + "0e4679d119ea1e127c984746a1f7e66c0000000000000000266a24aa21a9ede2f61c3f71d1d" + + "efd3fa999dfa36953755c690689799962b48bebd836974e8cf9012000000000000000000000" + + "0000000000000000000000000000000000000000000000000000" + + // block3aHex is a block that builds on top of block2aHex. + // Has blockhash of "0b0216936d1a5c01362256d06a9c9a2b13768fa2f2748549a71008af36dd167f". + block3aHex := "00000020689b8fcdc840892f378316bc439265bd7eccf4ed9d74695cd93ec24ce3a48" + + "15161a430ce5cae955b1254b753bc95854d942947855d3ae59002de9773b7fe65fdf16b6f63" + + "ffff7f200100000001020000000001010000000000000000000000000000000000000000000" + + "000000000000000000000ffffffff025300ffffffff0200f2052a0100000016001471da0afb" + + "883c228b18af6bd0cabc471aebe8d1750000000000000000266a24aa21a9ede2f61c3f71d1d" + + "efd3fa999dfa36953755c690689799962b48bebd836974e8cf9012000000000000000000000" + + "0000000000000000000000000000000000000000000000000000" + + // block4aHex is a block that builds on top of block3aHex. + // Has blockhash of "65a00a026eaa83f6e7a7f4a920faa090f3f9d3565a56df2362db2ab2fa14ccec". + block4aHex := "000000207f16dd36af0810a7498574f2a28f76132b9a9c6ad0562236015c1a6d93160" + + "20b951fa5ee5072d88d6aef9601999307dbd8d96dad067b80bfe04afe81c7a8c21beb706f63" + + "ffff7f200000000001020000000001010000000000000000000000000000000000000000000" + + "000000000000000000000ffffffff025400ffffffff0200f2052a01000000160014fd1f118c" + + "95a712b8adef11c3cc0643bcb6b709f10000000000000000266a24aa21a9ede2f61c3f71d1d" + + "efd3fa999dfa36953755c690689799962b48bebd836974e8cf9012000000000000000000000" + + "0000000000000000000000000000000000000000000000000000" + + // block5aHex is a block that builds on top of block4aHex. + // Has blockhash of "5c8814bc034a4c37fa5ccdc05e09b45a771bd7505d68092f21869a912737ee10". + block5aHex := "00000020eccc14fab22adb6223df565a56d3f9f390a0fa20a9f4a7e7f683aa6e020aa" + + "0656331bd4fcd3db611de7fbf72ef3dff0b85b244b5a983d5c0270e728214f67f9aaa766f63" + + "ffff7f200600000001020000000001010000000000000000000000000000000000000000000" + + "000000000000000000000ffffffff025500ffffffff0200f2052a0100000016001438335896" + + "ad1d087e3541436a5b293c0d23ad27e60000000000000000266a24aa21a9ede2f61c3f71d1d" + + "efd3fa999dfa36953755c690689799962b48bebd836974e8cf9012000000000000000000000" + + "0000000000000000000000000000000000000000000000000000" + + // block4bHex is a block that builds on top of block3aHex. + // Has blockhash of "130458e795cc46f2759195e92737426fb0ada2a07f98434551ffb7500b23c161". + block4bHex := "000000207f16dd36af0810a7498574f2a28f76132b9a9c6ad0562236015c1a6d93160" + + "20b14f9ce93d0144c383fea72f408b06b268a1523a029b825a1edfa15b367f6db2cfd7d6f63" + + "ffff7f200200000001020000000001010000000000000000000000000000000000000000000" + + "000000000000000000000ffffffff025400ffffffff0200f2052a0100000016001405b5ba2d" + + "1e549c4c84a623de3575948d3ef8a27f0000000000000000266a24aa21a9ede2f61c3f71d1d" + + "efd3fa999dfa36953755c690689799962b48bebd836974e8cf9012000000000000000000000" + + "0000000000000000000000000000000000000000000000000000" + + // Set up regtest chain. + r, err := rpctest.New(&chaincfg.RegressionNetParams, nil, nil, "") + if err != nil { + t.Fatal("TestGetChainTips fail. Unable to create primary harness: ", err) + } + if err := r.SetUp(true, 0); err != nil { + t.Fatalf("TestGetChainTips fail. Unable to setup test chain: %v", err) + } + defer r.TearDown() + + // Immediately call getchaintips after setting up regtest. + gotChainTips, err := r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + // We expect a single genesis block. + expectedChainTips := []*btcjson.GetChainTipsResult{ + { + Height: 0, + Hash: chaincfg.RegressionNetParams.GenesisHash.String(), + BranchLen: 0, + Status: "active", + }, + } + err = compareMultipleChainTips(t, gotChainTips, expectedChainTips) + if err != nil { + t.Fatalf("TestGetChainTips fail. Error: %v", err) + } + + // Submit 4 blocks. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2 -> 3 -> 4 + blockStrings := []string{block1Hex, block2Hex, block3Hex, block4Hex} + for _, blockString := range blockStrings { + block := getBlockFromString(t, blockString) + err = r.Client.SubmitBlock(block, nil) + if err != nil { + t.Fatal(err) + } + } + + gotChainTips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + expectedChainTips = []*btcjson.GetChainTipsResult{ + { + Height: 4, + Hash: getBlockFromString(t, blockStrings[len(blockStrings)-1]).Hash().String(), + BranchLen: 0, + Status: "active", + }, + } + err = compareMultipleChainTips(t, gotChainTips, expectedChainTips) + if err != nil { + t.Fatalf("TestGetChainTips fail. Error: %v", err) + } + + // Submit 2 blocks that don't build on top of the current active tip. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2 -> 3 -> 4 (active) + // \ -> 2a -> 3a (valid-fork) + blockStrings = []string{block2aHex, block3aHex} + for _, blockString := range blockStrings { + block := getBlockFromString(t, blockString) + err = r.Client.SubmitBlock(block, nil) + if err != nil { + t.Fatal(err) + } + } + + gotChainTips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + expectedChainTips = []*btcjson.GetChainTipsResult{ + { + Height: 4, + Hash: getBlockFromString(t, block4Hex).Hash().String(), + BranchLen: 0, + Status: "active", + }, + { + Height: 3, + Hash: getBlockFromString(t, block3aHex).Hash().String(), + BranchLen: 2, + Status: "valid-fork", + }, + } + err = compareMultipleChainTips(t, gotChainTips, expectedChainTips) + if err != nil { + t.Fatalf("TestGetChainTips fail. Error: %v", err) + } + + // Submit a single block that don't build on top of the current active tip. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2 -> 3 -> 4 (active) + // \ -> 2a -> 3a -> 4a (valid-fork) + block := getBlockFromString(t, block4aHex) + err = r.Client.SubmitBlock(block, nil) + if err != nil { + t.Fatal(err) + } + + gotChainTips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + expectedChainTips = []*btcjson.GetChainTipsResult{ + { + Height: 4, + Hash: getBlockFromString(t, block4Hex).Hash().String(), + BranchLen: 0, + Status: "active", + }, + { + Height: 4, + Hash: getBlockFromString(t, block4aHex).Hash().String(), + BranchLen: 3, + Status: "valid-fork", + }, + } + err = compareMultipleChainTips(t, gotChainTips, expectedChainTips) + if err != nil { + t.Fatalf("TestGetChainTips fail. Error: %v", err) + } + + // Submit a single block that changes the active branch to 5a. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2 -> 3 -> 4 (valid-fork) + // \ -> 2a -> 3a -> 4a -> 5a (active) + block = getBlockFromString(t, block5aHex) + err = r.Client.SubmitBlock(block, nil) + if err != nil { + t.Fatal(err) + } + gotChainTips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + expectedChainTips = []*btcjson.GetChainTipsResult{ + { + Height: 4, + Hash: getBlockFromString(t, block4Hex).Hash().String(), + BranchLen: 3, + Status: "valid-fork", + }, + { + Height: 5, + Hash: getBlockFromString(t, block5aHex).Hash().String(), + BranchLen: 0, + Status: "active", + }, + } + err = compareMultipleChainTips(t, gotChainTips, expectedChainTips) + if err != nil { + t.Fatalf("TestGetChainTips fail. Error: %v", err) + } + + // Submit a single block that builds on top of 3a. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2 -> 3 -> 4 (valid-fork) + // \ -> 2a -> 3a -> 4a -> 5a (active) + // \ -> 4b (valid-fork) + block = getBlockFromString(t, block4bHex) + err = r.Client.SubmitBlock(block, nil) + if err != nil { + t.Fatal(err) + } + gotChainTips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + expectedChainTips = []*btcjson.GetChainTipsResult{ + { + Height: 4, + Hash: getBlockFromString(t, block4Hex).Hash().String(), + BranchLen: 3, + Status: "valid-fork", + }, + { + Height: 5, + Hash: getBlockFromString(t, block5aHex).Hash().String(), + BranchLen: 0, + Status: "active", + }, + { + Height: 4, + Hash: getBlockFromString(t, block4bHex).Hash().String(), + BranchLen: 1, + Status: "valid-fork", + }, + } + + err = compareMultipleChainTips(t, gotChainTips, expectedChainTips) + if err != nil { + t.Fatalf("TestGetChainTips fail. Error: %v", err) + } +} diff --git a/integration/prune_test.go b/integration/prune_test.go new file mode 100644 index 0000000000..ac363cb8ca --- /dev/null +++ b/integration/prune_test.go @@ -0,0 +1,40 @@ +// Copyright (c) 2023 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file is ignored during the regular tests due to the following build tag. +//go:build rpctest +// +build rpctest + +package integration + +import ( + "testing" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/integration/rpctest" + "github.com/stretchr/testify/require" +) + +func TestPrune(t *testing.T) { + t.Parallel() + + // Boilerplate code to make a pruned node. + btcdCfg := []string{"--prune=1536"} + r, err := rpctest.New(&chaincfg.SimNetParams, nil, btcdCfg, "") + require.NoError(t, err) + + if err := r.SetUp(false, 0); err != nil { + require.NoError(t, err) + } + t.Cleanup(func() { r.TearDown() }) + + // Check that the rpc call for block chain info comes back correctly. + chainInfo, err := r.Client.GetBlockChainInfo() + require.NoError(t, err) + + if !chainInfo.Pruned { + t.Fatalf("expected the node to be pruned but the pruned "+ + "boolean was %v", chainInfo.Pruned) + } +} diff --git a/integration/rpctest/blockgen.go b/integration/rpctest/blockgen.go index a35c66e6ac..07371fb8a1 100644 --- a/integration/rpctest/blockgen.go +++ b/integration/rpctest/blockgen.go @@ -12,12 +12,12 @@ import ( "time" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/mining" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // solveBlock attempts to find a nonce which makes the passed block header hash @@ -197,12 +197,12 @@ func CreateBlock(prevBlock *btcutil.Block, inclusionTxs []*btcutil.Tx, _ = mining.AddWitnessCommitment(coinbaseTx, blockTxns) } - merkles := blockchain.BuildMerkleTreeStore(blockTxns, false) + merkleRoot := blockchain.CalcMerkleRoot(blockTxns, false) var block wire.MsgBlock block.Header = wire.BlockHeader{ Version: blockVersion, PrevBlock: *prevHash, - MerkleRoot: *merkles[len(merkles)-1], + MerkleRoot: merkleRoot, Timestamp: ts, Bits: net.PowLimitBits, } diff --git a/integration/rpctest/node.go b/integration/rpctest/node.go index be52a15404..8dddc75001 100644 --- a/integration/rpctest/node.go +++ b/integration/rpctest/node.go @@ -6,7 +6,6 @@ package rpctest import ( "fmt" - "io/ioutil" "log" "os" "os/exec" @@ -14,8 +13,8 @@ import ( "runtime" "time" - rpc "github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/btcutil" + rpc "github.com/btcsuite/btcd/rpcclient" ) // nodeConfig contains all the args, and data required to launch a btcd process @@ -31,7 +30,7 @@ type nodeConfig struct { profile string debugLevel string extra []string - prefix string + nodeDir string exe string endpoint string @@ -41,7 +40,7 @@ type nodeConfig struct { } // newConfig returns a newConfig with all default values. -func newConfig(prefix, certFile, keyFile string, extra []string, +func newConfig(nodeDir, certFile, keyFile string, extra []string, customExePath string) (*nodeConfig, error) { var btcdPath string @@ -61,7 +60,7 @@ func newConfig(prefix, certFile, keyFile string, extra []string, rpcUser: "user", rpcPass: "pass", extra: extra, - prefix: prefix, + nodeDir: nodeDir, exe: btcdPath, endpoint: "ws", certFile: certFile, @@ -77,17 +76,9 @@ func newConfig(prefix, certFile, keyFile string, extra []string, // temporary data, and log directories which must be cleaned up with a call to // cleanup(). func (n *nodeConfig) setDefaults() error { - datadir, err := ioutil.TempDir("", n.prefix+"-data") - if err != nil { - return err - } - n.dataDir = datadir - logdir, err := ioutil.TempDir("", n.prefix+"-logs") - if err != nil { - return err - } - n.logDir = logdir - cert, err := ioutil.ReadFile(n.certFile) + n.dataDir = filepath.Join(n.nodeDir, "data") + n.logDir = filepath.Join(n.nodeDir, "logs") + cert, err := os.ReadFile(n.certFile) if err != nil { return err } @@ -163,22 +154,7 @@ func (n *nodeConfig) rpcConnConfig() rpc.ConnConfig { // String returns the string representation of this nodeConfig. func (n *nodeConfig) String() string { - return n.prefix -} - -// cleanup removes the tmp data and log directories. -func (n *nodeConfig) cleanup() error { - dirs := []string{ - n.logDir, - n.dataDir, - } - var err error - for _, dir := range dirs { - if err = os.RemoveAll(dir); err != nil { - log.Printf("Cannot remove dir %s: %v", dir, err) - } - } - return err + return n.nodeDir } // node houses the necessary state required to configure, launch, and manage a @@ -213,8 +189,7 @@ func (n *node) start() error { return err } - pid, err := os.Create(filepath.Join(n.dataDir, - fmt.Sprintf("%s.pid", n.config))) + pid, err := os.Create(filepath.Join(n.dataDir, "btcd.pid")) if err != nil { return err } @@ -258,7 +233,10 @@ func (n *node) cleanup() error { } } - return n.config.cleanup() + // Since the node's main data directory is passed in to the node config, + // it isn't our responsibility to clean it up. So we're done after + // removing the pid file. + return nil } // shutdown terminates the running btcd process, and cleans up all @@ -283,11 +261,11 @@ func genCertPair(certFile, keyFile string) error { } // Write cert and key files. - if err = ioutil.WriteFile(certFile, cert, 0666); err != nil { + if err = os.WriteFile(certFile, cert, 0666); err != nil { return err } - if err = ioutil.WriteFile(keyFile, key, 0600); err != nil { - os.Remove(certFile) + if err = os.WriteFile(keyFile, key, 0600); err != nil { + _ = os.Remove(certFile) return err } diff --git a/integration/rpctest/rpc_harness.go b/integration/rpctest/rpc_harness.go index 2cb7e56d26..0b85232868 100644 --- a/integration/rpctest/rpc_harness.go +++ b/integration/rpctest/rpc_harness.go @@ -6,7 +6,6 @@ package rpctest import ( "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -152,8 +151,7 @@ func New(activeNet *chaincfg.Params, handlers *rpcclient.NotificationHandlers, return nil, err } - harnessID := strconv.Itoa(numTestInstances) - nodeTestData, err := ioutil.TempDir(testDir, "harness-"+harnessID) + nodeTestData, err := os.MkdirTemp(testDir, "rpc-node") if err != nil { return nil, err } @@ -173,7 +171,7 @@ func New(activeNet *chaincfg.Params, handlers *rpcclient.NotificationHandlers, extraArgs = append(extraArgs, miningAddr) config, err := newConfig( - "rpctest", certFile, keyFile, extraArgs, customExePath, + nodeTestData, certFile, keyFile, extraArgs, customExePath, ) if err != nil { return nil, err @@ -248,10 +246,10 @@ func (h *Harness) SetUp(createTestChain bool, numMatureOutputs uint32) error { // Start the btcd node itself. This spawns a new process which will be // managed if err := h.node.start(); err != nil { - return err + return fmt.Errorf("error starting node: %w", err) } if err := h.connectRPCClient(); err != nil { - return err + return fmt.Errorf("error connecting RPC client: %w", err) } h.wallet.Start() @@ -272,8 +270,8 @@ func (h *Harness) SetUp(createTestChain bool, numMatureOutputs uint32) error { // Create a test chain with the desired number of mature coinbase // outputs. if createTestChain && numMatureOutputs != 0 { - numToGenerate := (uint32(h.ActiveNet.CoinbaseMaturity) + - numMatureOutputs) + coinbaseMaturity := uint32(h.ActiveNet.CoinbaseMaturity) + numToGenerate := coinbaseMaturity + numMatureOutputs _, err := h.Client.Generate(numToGenerate) if err != nil { return err @@ -351,15 +349,18 @@ func (h *Harness) connectRPCClient() error { batchConf.HTTPPostMode = true for i := 0; i < h.MaxConnRetries; i++ { fail := false + timeout := time.Duration(i) * h.ConnectionRetryTimeout if client == nil { - if client, err = rpcclient.New(&rpcConf, h.handlers); err != nil { - time.Sleep(time.Duration(i) * h.ConnectionRetryTimeout) + client, err = rpcclient.New(&rpcConf, h.handlers) + if err != nil { + time.Sleep(timeout) fail = true } } if batchClient == nil { - if batchClient, err = rpcclient.NewBatch(&batchConf); err != nil { - time.Sleep(time.Duration(i) * h.ConnectionRetryTimeout) + batchClient, err = rpcclient.NewBatch(&batchConf) + if err != nil { + time.Sleep(timeout) fail = true } } @@ -369,7 +370,9 @@ func (h *Harness) connectRPCClient() error { } if client == nil || batchClient == nil { - return fmt.Errorf("connection timeout") + return fmt.Errorf("connection timeout, tried %d times with "+ + "timeout %v, last err: %w", h.MaxConnRetries, + h.ConnectionRetryTimeout, err) } h.Client = client @@ -558,6 +561,111 @@ func NextAvailablePort() int { panic("no ports available for listening") } +// NextAvailablePortForProcess returns the first port that is available for +// listening by a new node, using a lock file to make sure concurrent access for +// parallel tasks within the same process don't re-use the same port. It panics +// if no port is found and the maximum available TCP port is reached. +func NextAvailablePortForProcess(pid int) int { + lockFile := filepath.Join( + os.TempDir(), fmt.Sprintf("rpctest-port-pid-%d.lock", pid), + ) + timeout := time.After(time.Second) + + var ( + lockFileHandle *os.File + err error + ) + for { + // Attempt to acquire the lock file. If it already exists, wait + // for a bit and retry. + lockFileHandle, err = os.OpenFile( + lockFile, os.O_CREATE|os.O_EXCL, 0600, + ) + if err == nil { + // Lock acquired. + break + } + + // Wait for a bit and retry. + select { + case <-timeout: + panic("timeout waiting for lock file") + case <-time.After(10 * time.Millisecond): + } + } + + // Release the lock file when we're done. + defer func() { + // Always close file first, Windows won't allow us to remove it + // otherwise. + _ = lockFileHandle.Close() + err := os.Remove(lockFile) + if err != nil { + panic(fmt.Errorf("couldn't remove lock file: %w", err)) + } + }() + + portFile := filepath.Join( + os.TempDir(), fmt.Sprintf("rpctest-port-pid-%d", pid), + ) + port, err := os.ReadFile(portFile) + if err != nil { + if !os.IsNotExist(err) { + panic(fmt.Errorf("error reading port file: %w", err)) + } + port = []byte(strconv.Itoa(int(defaultNodePort))) + } + + lastPort, err := strconv.Atoi(string(port)) + if err != nil { + panic(fmt.Errorf("error parsing port: %w", err)) + } + + // We take the next one. + lastPort++ + for lastPort < 65535 { + // If there are no errors while attempting to listen on this + // port, close the socket and return it as available. While it + // could be the case that some other process picks up this port + // between the time the socket is closed and it's reopened in + // the harness node, in practice in CI servers this seems much + // less likely than simply some other process already being + // bound at the start of the tests. + addr := fmt.Sprintf(ListenerFormat, lastPort) + l, err := net.Listen("tcp4", addr) + if err == nil { + err := l.Close() + if err == nil { + err := os.WriteFile( + portFile, + []byte(strconv.Itoa(lastPort)), 0600, + ) + if err != nil { + panic(fmt.Errorf("error updating "+ + "port file: %w", err)) + } + + return lastPort + } + } + lastPort++ + } + + // No ports available? Must be a mistake. + panic("no ports available for listening") +} + +// GenerateProcessUniqueListenerAddresses is a function that returns two +// listener addresses with unique ports per the given process id and should be +// used to overwrite rpctest's default generator which is prone to use colliding +// ports. +func GenerateProcessUniqueListenerAddresses(pid int) (string, string) { + port1 := NextAvailablePortForProcess(pid) + port2 := NextAvailablePortForProcess(pid) + return fmt.Sprintf(ListenerFormat, port1), + fmt.Sprintf(ListenerFormat, port2) +} + // baseDir is the directory path of the temp directory for all rpctest files. func baseDir() (string, error) { dirPath := filepath.Join(os.TempDir(), "btcd", "rpctest") diff --git a/integration/rpctest/rpc_harness_test.go b/integration/rpctest/rpc_harness_test.go index baadd35f5e..978f8d8290 100644 --- a/integration/rpctest/rpc_harness_test.go +++ b/integration/rpctest/rpc_harness_test.go @@ -14,11 +14,11 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) func testSendOutputs(r *Harness, t *testing.T) { diff --git a/limits/limits_unix.go b/limits/limits_unix.go index 7ebf866789..7972b05e20 100644 --- a/limits/limits_unix.go +++ b/limits/limits_unix.go @@ -2,6 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. +//go:build !windows && !plan9 // +build !windows,!plan9 package limits diff --git a/mempool/doc.go b/mempool/doc.go index 3adad018ba..22fb2a06a0 100644 --- a/mempool/doc.go +++ b/mempool/doc.go @@ -31,40 +31,40 @@ proceed. Typically, this will involve things such as relaying the transactions to other peers on the network and notifying the mining process that new transactions are available. -Feature Overview +# Feature Overview The following is a quick overview of the major features. It is not intended to be an exhaustive list. - - Maintain a pool of fully validated transactions - - Reject non-fully-spent duplicate transactions - - Reject coinbase transactions - - Reject double spends (both from the chain and other transactions in pool) - - Reject invalid transactions according to the network consensus rules - - Full script execution and validation with signature cache support - - Individual transaction query support - - Orphan transaction support (transactions that spend from unknown outputs) - - Configurable limits (see transaction acceptance policy) - - Automatic addition of orphan transactions that are no longer orphans as new - transactions are added to the pool - - Individual orphan transaction query support - - Configurable transaction acceptance policy - - Option to accept or reject standard transactions - - Option to accept or reject transactions based on priority calculations - - Rate limiting of low-fee and free transactions - - Non-zero fee threshold - - Max signature operations per transaction - - Max orphan transaction size - - Max number of orphan transactions allowed - - Additional metadata tracking for each transaction - - Timestamp when the transaction was added to the pool - - Most recent block height when the transaction was added to the pool - - The fee the transaction pays - - The starting priority for the transaction - - Manual control of transaction removal - - Recursive removal of all dependent transactions + - Maintain a pool of fully validated transactions + 1. Reject non-fully-spent duplicate transactions + 2. Reject coinbase transactions + 3. Reject double spends (both from the chain and other transactions in pool) + 4. Reject invalid transactions according to the network consensus rules + 5. Full script execution and validation with signature cache support + 6. Individual transaction query support + - Orphan transaction support (transactions that spend from unknown outputs) + 1. Configurable limits (see transaction acceptance policy) + 2. Automatic addition of orphan transactions that are no longer orphans as new + transactions are added to the pool + 3. Individual orphan transaction query support + - Configurable transaction acceptance policy + 1. Option to accept or reject standard transactions + 2. Option to accept or reject transactions based on priority calculations + 3. Rate limiting of low-fee and free transactions + 4. Non-zero fee threshold + 5. Max signature operations per transaction + 6. Max orphan transaction size + 7. Max number of orphan transactions allowed + - Additional metadata tracking for each transaction + 1. Timestamp when the transaction was added to the pool + 2. Most recent block height when the transaction was added to the pool + 3. The fee the transaction pays + 4. The starting priority for the transaction + - Manual control of transaction removal + 1. Recursive removal of all dependent transactions -Errors +# Errors Errors returned by this package are either the raw errors provided by underlying calls or of type mempool.RuleError. Since there are two classes of rules diff --git a/mempool/estimatefee.go b/mempool/estimatefee.go index a71ce42f12..2d1794b797 100644 --- a/mempool/estimatefee.go +++ b/mempool/estimatefee.go @@ -16,9 +16,9 @@ import ( "strings" "sync" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/mining" - "github.com/btcsuite/btcd/btcutil" ) // TODO incorporate Alex Morcos' modifications to Gavin's initial model diff --git a/mempool/estimatefee_test.go b/mempool/estimatefee_test.go index c5ea85c635..c1e0906096 100644 --- a/mempool/estimatefee_test.go +++ b/mempool/estimatefee_test.go @@ -9,10 +9,10 @@ import ( "math/rand" "testing" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/mining" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // newTestFeeEstimator creates a feeEstimator with some different parameters diff --git a/mining/cpuminer/cpuminer.go b/mining/cpuminer/cpuminer.go index 038e6645bb..2c07f2ee1f 100644 --- a/mining/cpuminer/cpuminer.go +++ b/mining/cpuminer/cpuminer.go @@ -13,11 +13,11 @@ import ( "time" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/mining" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( diff --git a/mining/mining.go b/mining/mining.go index 4ed61f3f32..7905dade76 100644 --- a/mining/mining.go +++ b/mining/mining.go @@ -11,11 +11,11 @@ import ( "time" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( @@ -42,7 +42,7 @@ type TxDesc struct { // Added is the time when the entry was added to the source pool. Added time.Time - // Height is the block height when the entry was added to the the source + // Height is the block height when the entry was added to the source // pool. Height int32 @@ -420,26 +420,26 @@ func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params, // // Given the above, a block generated by this function is of the following form: // -// ----------------------------------- -- -- -// | Coinbase Transaction | | | -// |-----------------------------------| | | -// | | | | ----- policy.BlockPrioritySize -// | High-priority Transactions | | | -// | | | | -// |-----------------------------------| | -- -// | | | -// | | | -// | | |--- policy.BlockMaxSize -// | Transactions prioritized by fee | | -// | until <= policy.TxMinFreeFee | | -// | | | -// | | | -// | | | -// |-----------------------------------| | -// | Low-fee/Non high-priority (free) | | -// | transactions (while block size | | -// | <= policy.BlockMinSize) | | -// ----------------------------------- -- +// ----------------------------------- -- -- +// | Coinbase Transaction | | | +// |-----------------------------------| | | +// | | | | ----- policy.BlockPrioritySize +// | High-priority Transactions | | | +// | | | | +// |-----------------------------------| | -- +// | | | +// | | | +// | | |--- policy.BlockMaxSize +// | Transactions prioritized by fee | | +// | until <= policy.TxMinFreeFee | | +// | | | +// | | | +// | | | +// |-----------------------------------| | +// | Low-fee/Non high-priority (free) | | +// | transactions (while block size | | +// | <= policy.BlockMinSize) | | +// ----------------------------------- -- func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress btcutil.Address) (*BlockTemplate, error) { // Extend the most recently known best block. best := g.chain.BestSnapshot() @@ -520,7 +520,7 @@ mempoolLoop: continue } - // Fetch all of the utxos referenced by the this transaction. + // Fetch all of the utxos referenced by this transaction. // NOTE: This intentionally does not fetch inputs from the // mempool since a transaction which depends on other // transactions in the mempool must come after those @@ -823,12 +823,11 @@ mempoolLoop: } // Create a new block ready to be solved. - merkles := blockchain.BuildMerkleTreeStore(blockTxns, false) var msgBlock wire.MsgBlock msgBlock.Header = wire.BlockHeader{ Version: nextBlockVersion, PrevBlock: best.Hash, - MerkleRoot: *merkles[len(merkles)-1], + MerkleRoot: blockchain.CalcMerkleRoot(blockTxns, false), Timestamp: ts, Bits: reqDifficulty, } @@ -875,9 +874,7 @@ func AddWitnessCommitment(coinbaseTx *btcutil.Tx, // Next, obtain the merkle root of a tree which consists of the // wtxid of all transactions in the block. The coinbase // transaction will have a special wtxid of all zeroes. - witnessMerkleTree := blockchain.BuildMerkleTreeStore(blockTxns, - true) - witnessMerkleRoot := witnessMerkleTree[len(witnessMerkleTree)-1] + witnessMerkleRoot := blockchain.CalcMerkleRoot(blockTxns, true) // The preimage to the witness commitment is: // witnessRoot || coinbaseWitness @@ -953,8 +950,8 @@ func (g *BlkTmplGenerator) UpdateExtraNonce(msgBlock *wire.MsgBlock, blockHeight // Recalculate the merkle root with the updated extra nonce. block := btcutil.NewBlock(msgBlock) - merkles := blockchain.BuildMerkleTreeStore(block.Transactions(), false) - msgBlock.Header.MerkleRoot = *merkles[len(merkles)-1] + merkleRoot := blockchain.CalcMerkleRoot(block.Transactions(), false) + msgBlock.Header.MerkleRoot = merkleRoot return nil } diff --git a/mining/policy.go b/mining/policy.go index f8ce411602..6213c2b336 100644 --- a/mining/policy.go +++ b/mining/policy.go @@ -6,8 +6,8 @@ package mining import ( "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/wire" ) const ( diff --git a/mining/policy_test.go b/mining/policy_test.go index 0a362f5471..cc2fdfbfb2 100644 --- a/mining/policy_test.go +++ b/mining/policy_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // newHashFromStr converts the passed big-endian hex string into a diff --git a/netsync/blocklogger.go b/netsync/blocklogger.go index 788192ccb2..31a6a4c509 100644 --- a/netsync/blocklogger.go +++ b/netsync/blocklogger.go @@ -5,11 +5,13 @@ package netsync import ( + "fmt" "sync" "time" - "github.com/btcsuite/btclog" + "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btclog" ) // blockProgressLogger provides periodic logging for other services in order @@ -27,8 +29,9 @@ type blockProgressLogger struct { // newBlockProgressLogger returns a new block progress logger. // The progress message is templated as follows: -// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod} -// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp}) +// +// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod} +// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp}) func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger { return &blockProgressLogger{ lastBlockLogTime: time.Now(), @@ -40,7 +43,7 @@ func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *block // LogBlockHeight logs a new block height as an information message to show // progress to the user. In order to prevent spam, it limits logging to one // message every 10 seconds with duration and totals included. -func (b *blockProgressLogger) LogBlockHeight(block *btcutil.Block) { +func (b *blockProgressLogger) LogBlockHeight(block *btcutil.Block, chain *blockchain.BlockChain) { b.Lock() defer b.Unlock() @@ -66,9 +69,10 @@ func (b *blockProgressLogger) LogBlockHeight(block *btcutil.Block) { if b.receivedLogTx == 1 { txStr = "transaction" } - b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)", + cacheSizeStr := fmt.Sprintf("~%d MiB", chain.CachedStateSize()/1024/1024) + b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s, %s cache)", b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx, - txStr, block.Height(), block.MsgBlock().Header.Timestamp) + txStr, block.Height(), block.MsgBlock().Header.Timestamp, cacheSizeStr) b.receivedLogBlocks = 0 b.receivedLogTx = 0 diff --git a/netsync/interface.go b/netsync/interface.go index 2ce479bf2d..6a873bd888 100644 --- a/netsync/interface.go +++ b/netsync/interface.go @@ -6,12 +6,12 @@ package netsync import ( "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/mempool" "github.com/btcsuite/btcd/peer" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // PeerNotifier exposes methods to notify peers of status changes to diff --git a/netsync/manager.go b/netsync/manager.go index a297bb3f24..41ba70aa6a 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -13,13 +13,13 @@ import ( "time" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/mempool" peerpkg "github.com/btcsuite/btcd/peer" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( @@ -397,20 +397,55 @@ func (sm *SyncManager) isSyncCandidate(peer *peerpkg.Peer) bool { if host != "127.0.0.1" && host != "localhost" { return false } - } else { - // The peer is not a candidate for sync if it's not a full - // node. Additionally, if the segwit soft-fork package has - // activated, then the peer must also be upgraded. - segwitActive, err := sm.chain.IsDeploymentActive(chaincfg.DeploymentSegwit) - if err != nil { - log.Errorf("Unable to query for segwit "+ - "soft-fork state: %v", err) - } - nodeServices := peer.Services() - if nodeServices&wire.SFNodeNetwork != wire.SFNodeNetwork || - (segwitActive && !peer.IsWitnessEnabled()) { + + // Candidate if all checks passed. + return true + } + + // If the segwit soft-fork package has activated, then the peer must + // also be upgraded. + segwitActive, err := sm.chain.IsDeploymentActive( + chaincfg.DeploymentSegwit, + ) + if err != nil { + log.Errorf("Unable to query for segwit soft-fork state: %v", + err) + } + + if segwitActive && !peer.IsWitnessEnabled() { + return false + } + + var ( + nodeServices = peer.Services() + fullNode = nodeServices.HasFlag(wire.SFNodeNetwork) + prunedNode = nodeServices.HasFlag(wire.SFNodeNetworkLimited) + ) + + switch { + case fullNode: + // Node is a sync candidate if it has all the blocks. + + case prunedNode: + // Even if the peer is pruned, if they have the node network + // limited flag, they are able to serve 2 days worth of blocks + // from the current tip. Therefore, check if our chaintip is + // within that range. + bestHeight := sm.chain.BestSnapshot().Height + peerLastBlock := peer.LastBlock() + + // bestHeight+1 as we need the peer to serve us the next block, + // not the one we already have. + if bestHeight+1 <= + peerLastBlock-wire.NodeNetworkLimitedBlockThreshold { + return false } + + default: + // If the peer isn't an archival node, and it's not signaling + // NODE_NETWORK_LIMITED, we can't sync off of this node. + return false } // Candidate if all checks passed. @@ -428,7 +463,7 @@ func (sm *SyncManager) handleNewPeerMsg(peer *peerpkg.Peer) { log.Infof("New valid peer %s (%s)", peer, peer.UserAgent()) - // Initialize the peer state + // Initialize the peer state. isSyncCandidate := sm.isSyncCandidate(peer) sm.peerStates[peer] = &peerSyncState{ syncCandidate: isSyncCandidate, @@ -781,7 +816,7 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { // When the block is not an orphan, log information about it and // update the chain state. - sm.progressLogger.LogBlockHeight(bmsg.block) + sm.progressLogger.LogBlockHeight(bmsg.block, sm.chain) // Update this peer's latest block height, for future // potential sync node candidacy. @@ -805,8 +840,13 @@ func (sm *SyncManager) handleBlockMsg(bmsg *blockMsg) { } } - // Nothing more to do if we aren't in headers-first mode. + // If we are not in headers first mode, it's a good time to periodically + // flush the blockchain cache because we don't expect new blocks immediately. + // After that, there is nothing more to do. if !sm.headersFirstMode { + if err := sm.chain.FlushUtxoCache(blockchain.FlushPeriodic); err != nil { + log.Errorf("Error while flushing the blockchain cache: %v", err) + } return } @@ -1379,6 +1419,11 @@ out: } } + log.Debug("Block handler shutting down: flushing blockchain caches...") + if err := sm.chain.FlushUtxoCache(blockchain.FlushRequired); err != nil { + log.Errorf("Error while flushing blockchain caches: %v", err) + } + sm.wg.Done() log.Trace("Block handler done") } diff --git a/peer/doc.go b/peer/doc.go index 88fae8e850..d2c66ff3cd 100644 --- a/peer/doc.go +++ b/peer/doc.go @@ -6,7 +6,7 @@ Package peer provides a common base for creating and managing Bitcoin network peers. -Overview +# Overview This package builds upon the wire package, which provides the fundamental primitives necessary to speak the bitcoin wire protocol, in order to simplify @@ -16,41 +16,41 @@ Payment Verification (SPV) nodes, proxies, etc. A quick overview of the major features peer provides are as follows: - - Provides a basic concurrent safe bitcoin peer for handling bitcoin - communications via the peer-to-peer protocol - - Full duplex reading and writing of bitcoin protocol messages - - Automatic handling of the initial handshake process including protocol - version negotiation - - Asynchronous message queuing of outbound messages with optional channel for - notification when the message is actually sent - - Flexible peer configuration - - Caller is responsible for creating outgoing connections and listening for - incoming connections so they have flexibility to establish connections as - they see fit (proxies, etc) - - User agent name and version - - Bitcoin network - - Service support signalling (full nodes, bloom filters, etc) - - Maximum supported protocol version - - Ability to register callbacks for handling bitcoin protocol messages - - Inventory message batching and send trickling with known inventory detection - and avoidance - - Automatic periodic keep-alive pinging and pong responses - - Random nonce generation and self connection detection - - Proper handling of bloom filter related commands when the caller does not - specify the related flag to signal support - - Disconnects the peer when the protocol version is high enough - - Does not invoke the related callbacks for older protocol versions - - Snapshottable peer statistics such as the total number of bytes read and - written, the remote address, user agent, and negotiated protocol version - - Helper functions pushing addresses, getblocks, getheaders, and reject - messages - - These could all be sent manually via the standard message output function, - but the helpers provide additional nice functionality such as duplicate - filtering and address randomization - - Ability to wait for shutdown/disconnect - - Comprehensive test coverage - -Peer Configuration + - Provides a basic concurrent safe bitcoin peer for handling bitcoin + communications via the peer-to-peer protocol + - Full duplex reading and writing of bitcoin protocol messages + - Automatic handling of the initial handshake process including protocol + version negotiation + - Asynchronous message queuing of outbound messages with optional channel for + notification when the message is actually sent + - Flexible peer configuration + 1. Caller is responsible for creating outgoing connections and listening for + incoming connections so they have flexibility to establish connections as + they see fit (proxies, etc) + 2. User agent name and version + 3. Bitcoin network + 4. Service support signalling (full nodes, bloom filters, etc) + 5. Maximum supported protocol version + 6. Ability to register callbacks for handling bitcoin protocol messages + - Inventory message batching and send trickling with known inventory detection + and avoidance + - Automatic periodic keep-alive pinging and pong responses + - Random nonce generation and self connection detection + - Proper handling of bloom filter related commands when the caller does not + specify the related flag to signal support + 1. Disconnects the peer when the protocol version is high enough + 2. Does not invoke the related callbacks for older protocol versions + - Snapshottable peer statistics such as the total number of bytes read and + written, the remote address, user agent, and negotiated protocol version + - Helper functions pushing addresses, getblocks, getheaders, and reject + messages + 1. These could all be sent manually via the standard message output function, + but the helpers provide additional nice functionality such as duplicate + filtering and address randomization + - Ability to wait for shutdown/disconnect + - Comprehensive test coverage + +# Peer Configuration All peer configuration is handled with the Config struct. This allows the caller to specify things such as the user agent name and version, the bitcoin @@ -58,7 +58,7 @@ network to use, which services it supports, and callbacks to invoke when bitcoin messages are received. See the documentation for each field of the Config struct for more details. -Inbound and Outbound Peers +# Inbound and Outbound Peers A peer can either be inbound or outbound. The caller is responsible for establishing the connection to remote peers and listening for incoming peers. @@ -73,7 +73,7 @@ Disconnect to disconnect from the peer and clean up all resources. WaitForDisconnect can be used to block until peer disconnection and resource cleanup has completed. -Callbacks +# Callbacks In order to do anything useful with a peer, it is necessary to react to bitcoin messages. This is accomplished by creating an instance of the MessageListeners @@ -92,7 +92,7 @@ It is often useful to use closures which encapsulate state when specifying the callback handlers. This provides a clean method for accessing that state when callbacks are invoked. -Queuing Messages and Inventory +# Queuing Messages and Inventory The QueueMessage function provides the fundamental means to send messages to the remote peer. As the name implies, this employs a non-blocking queue. A done @@ -106,7 +106,7 @@ QueueInventory function. It employs batching and trickling along with intelligent known remote peer inventory detection and avoidance through the use of a most-recently used algorithm. -Message Sending Helper Functions +# Message Sending Helper Functions In addition to the bare QueueMessage function previously described, the PushAddrMsg, PushGetBlocksMsg, PushGetHeadersMsg, and PushRejectMsg functions @@ -128,13 +128,13 @@ appropriate reject message based on the provided parameters as well as optionally provides a flag to cause it to block until the message is actually sent. -Peer Statistics +# Peer Statistics A snapshot of the current peer statistics can be obtained with the StatsSnapshot function. This includes statistics such as the total number of bytes read and written, the remote address, user agent, and negotiated protocol version. -Logging +# Logging This package provides extensive logging capabilities through the UseLogger function which allows a btclog.Logger to be specified. For example, logging at @@ -142,7 +142,7 @@ the debug level provides summaries of every message sent and received, and logging at the trace level provides full dumps of parsed messages as well as the raw message bytes using a format similar to hexdump -C. -Bitcoin Improvement Proposals +# Bitcoin Improvement Proposals This package supports all BIPS supported by the wire package. (https://pkg.go.dev/github.com/btcsuite/btcd/wire#hdr-Bitcoin_Improvement_Proposals) diff --git a/peer/peer.go b/peer/peer.go index 6d34c5f822..aa66cea98f 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -2206,14 +2206,14 @@ func (p *Peer) waitToFinishNegotiation(pver uint32) error { // peer. The events should occur in the following order, otherwise an error is // returned: // -// 1. Remote peer sends their version. -// 2. We send our version. -// 3. We send sendaddrv2 if their version is >= 70016. -// 4. We send our verack. -// 5. Wait until sendaddrv2 or verack is received. Unknown messages are -// skipped as it could be wtxidrelay or a different message in the future -// that btcd does not implement but bitcoind does. -// 6. If remote peer sent sendaddrv2 above, wait until receipt of verack. +// 1. Remote peer sends their version. +// 2. We send our version. +// 3. We send sendaddrv2 if their version is >= 70016. +// 4. We send our verack. +// 5. Wait until sendaddrv2 or verack is received. Unknown messages are +// skipped as it could be wtxidrelay or a different message in the future +// that btcd does not implement but bitcoind does. +// 6. If remote peer sent sendaddrv2 above, wait until receipt of verack. func (p *Peer) negotiateInboundProtocol() error { if err := p.readRemoteVersionMsg(); err != nil { return err @@ -2245,13 +2245,13 @@ func (p *Peer) negotiateInboundProtocol() error { // peer. The events should occur in the following order, otherwise an error is // returned: // -// 1. We send our version. -// 2. Remote peer sends their version. -// 3. We send sendaddrv2 if their version is >= 70016. -// 4. We send our verack. -// 5. We wait to receive sendaddrv2 or verack, skipping unknown messages as -// in the inbound case. -// 6. If sendaddrv2 was received, wait for receipt of verack. +// 1. We send our version. +// 2. Remote peer sends their version. +// 3. We send sendaddrv2 if their version is >= 70016. +// 4. We send our verack. +// 5. We wait to receive sendaddrv2 or verack, skipping unknown messages as +// in the inbound case. +// 6. If sendaddrv2 was received, wait for receipt of verack. func (p *Peer) negotiateOutboundProtocol() error { if err := p.writeLocalVersionMsg(); err != nil { return err diff --git a/rpcadapters.go b/rpcadapters.go index 487574a81c..5a6800c532 100644 --- a/rpcadapters.go +++ b/rpcadapters.go @@ -8,12 +8,12 @@ import ( "sync/atomic" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/mempool" "github.com/btcsuite/btcd/netsync" "github.com/btcsuite/btcd/peer" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // rpcPeer provides a peer for use with the RPC server and implements the diff --git a/rpcclient/chain.go b/rpcclient/chain.go index a97543fd3c..b21665991f 100644 --- a/rpcclient/chain.go +++ b/rpcclient/chain.go @@ -685,6 +685,44 @@ func (c *Client) GetBlockHeaderVerbose(blockHash *chainhash.Hash) (*btcjson.GetB return c.GetBlockHeaderVerboseAsync(blockHash).Receive() } +// FutureGetChainTipsResult is a future promise to deliver the result of a +// GetChainTips RPC invocation (or an applicable error). +type FutureGetChainTipsResult chan *Response + +// Receive waits for the Response promised by the future and returns the +// data structure of all the chain tips the node is aware of. +func (r FutureGetChainTipsResult) Receive() ([]*btcjson.GetChainTipsResult, error) { + res, err := ReceiveFuture(r) + if err != nil { + return nil, err + } + + // Unmarshal result as a string. + var chainTips []*btcjson.GetChainTipsResult + err = json.Unmarshal(res, &chainTips) + if err != nil { + return nil, err + } + + return chainTips, nil +} + +// GetChainTipsAsync returns an instance of a type that can be used to get the +// result of the RPC at some future time by invoking the Receive function on the +// returned instance. +// +// See GetChainTips for the blocking version and more details. +func (c *Client) GetChainTipsAsync() FutureGetChainTipsResult { + cmd := btcjson.NewGetChainTipsCmd() + return c.SendCmd(cmd) +} + +// GetChainTips returns a slice of data structure with information about all the +// current chain tips that this node is aware of. +func (c *Client) GetChainTips() ([]*btcjson.GetChainTipsResult, error) { + return c.GetChainTipsAsync().Receive() +} + // FutureGetMempoolEntryResult is a future promise to deliver the result of a // GetMempoolEntryAsync RPC invocation (or an applicable error). type FutureGetMempoolEntryResult chan *Response diff --git a/rpcclient/chain_test.go b/rpcclient/chain_test.go index e32d547ce3..de8d3a740e 100644 --- a/rpcclient/chain_test.go +++ b/rpcclient/chain_test.go @@ -1,6 +1,17 @@ package rpcclient -import "testing" +import ( + "errors" + "github.com/gorilla/websocket" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" +) + +var upgrader = websocket.Upgrader{} // TestUnmarshalGetBlockChainInfoResult ensures that the SoftForks and // UnifiedSoftForks fields of GetBlockChainInfoResult are properly unmarshaled @@ -90,3 +101,195 @@ func TestUnmarshalGetBlockChainInfoResultSoftForks(t *testing.T) { } } } + +func TestFutureGetBlockCountResultReceiveErrors(t *testing.T) { + responseChan := FutureGetBlockCountResult(make(chan *Response)) + response := Response{ + result: []byte{}, + err: errors.New("blah blah something bad happened"), + } + go func() { + responseChan <- &response + }() + + _, err := responseChan.Receive() + if err == nil || err.Error() != "blah blah something bad happened" { + t.Fatalf("unexpected error: %s", err.Error()) + } +} + +func TestFutureGetBlockCountResultReceiveMarshalsResponseCorrectly(t *testing.T) { + responseChan := FutureGetBlockCountResult(make(chan *Response)) + response := Response{ + result: []byte{0x36, 0x36}, + err: nil, + } + go func() { + responseChan <- &response + }() + + res, err := responseChan.Receive() + if err != nil { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if res != 66 { + t.Fatalf("unexpected response: %d (0x%X)", res, res) + } +} + +func TestClientConnectedToWSServerRunner(t *testing.T) { + type TestTableItem struct { + Name string + TestCase func(t *testing.T) + } + + testTable := []TestTableItem{ + TestTableItem{ + Name: "TestGetChainTxStatsAsyncSuccessTx", + TestCase: func(t *testing.T) { + client, serverReceivedChannel, cleanup := makeClient(t) + defer cleanup() + client.GetChainTxStatsAsync() + + message := <-serverReceivedChannel + if message != "{\"jsonrpc\":\"1.0\",\"method\":\"getchaintxstats\",\"params\":[],\"id\":1}" { + t.Fatalf("received unexpected message: %s", message) + } + }, + }, + TestTableItem{ + Name: "TestGetChainTxStatsAsyncShutdownError", + TestCase: func(t *testing.T) { + client, _, cleanup := makeClient(t) + defer cleanup() + + // a bit of a hack here: since there are multiple places where we read + // from the shutdown channel, and it is not buffered, ensure that a shutdown + // message is sent every time it is read from, this will ensure that + // when client.GetChainTxStatsAsync() gets called, it hits the non-blocking + // read from the shutdown channel + go func() { + type shutdownMessage struct{} + for { + client.shutdown <- shutdownMessage{} + } + }() + + var response *Response = nil + + for response == nil { + respChan := client.GetChainTxStatsAsync() + select { + case response = <-respChan: + default: + } + } + + if response.err == nil || response.err.Error() != "the client has been shutdown" { + t.Fatalf("unexpected error: %s", response.err.Error()) + } + }, + }, + TestTableItem{ + Name: "TestGetBestBlockHashAsync", + TestCase: func(t *testing.T) { + client, serverReceivedChannel, cleanup := makeClient(t) + defer cleanup() + ch := client.GetBestBlockHashAsync() + + message := <-serverReceivedChannel + if message != "{\"jsonrpc\":\"1.0\",\"method\":\"getbestblockhash\",\"params\":[],\"id\":1}" { + t.Fatalf("received unexpected message: %s", message) + } + + expectedResponse := Response{} + + wg := sync.WaitGroup{} + + wg.Add(1) + go func() { + defer wg.Done() + for { + client.requestLock.Lock() + if client.requestList.Len() > 0 { + r := client.requestList.Back() + r.Value.(*jsonRequest).responseChan <- &expectedResponse + client.requestLock.Unlock() + return + } + client.requestLock.Unlock() + } + }() + + response := <-ch + + if &expectedResponse != response { + t.Fatalf("received unexepcted response") + } + + // ensure the goroutine created in this test exists, + // the test is ran with a timeout + wg.Wait() + }, + }, + } + + // since these tests rely on concurrency, ensure there is a resonable timeout + // that they should run within + for _, testCase := range testTable { + done := make(chan bool) + + go func() { + t.Run(testCase.Name, testCase.TestCase) + done <- true + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatalf("timeout exceeded for: %s", testCase.Name) + } + } +} + +func makeClient(t *testing.T) (*Client, chan string, func()) { + serverReceivedChannel := make(chan string) + s := httptest.NewServer(http.HandlerFunc(makeUpgradeOnConnect(serverReceivedChannel))) + url := strings.TrimPrefix(s.URL, "http://") + + config := ConnConfig{ + DisableTLS: true, + User: "username", + Pass: "password", + Host: url, + } + + client, err := New(&config, nil) + if err != nil { + t.Fatalf("error when creating new client %s", err.Error()) + } + return client, serverReceivedChannel, func() { + s.Close() + } +} + +func makeUpgradeOnConnect(ch chan string) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + c, err := upgrader.Upgrade(w, r, nil) + if err != nil { + return + } + defer c.Close() + for { + _, message, err := c.ReadMessage() + if err != nil { + break + } + + go func() { + ch <- string(message) + }() + } + } +} diff --git a/rpcclient/doc.go b/rpcclient/doc.go index b682ba10f2..8057dd240c 100644 --- a/rpcclient/doc.go +++ b/rpcclient/doc.go @@ -5,7 +5,7 @@ /* Package rpcclient implements a websocket-enabled Bitcoin JSON-RPC client. -Overview +# Overview This client provides a robust and easy to use client for interfacing with a Bitcoin RPC server that uses a btcd/bitcoin core compatible Bitcoin JSON-RPC @@ -24,7 +24,7 @@ btcd or btcwallet by default. However, configuration options are provided to fall back to HTTP POST and disable TLS to support talking with inferior bitcoin core style RPC servers. -Websockets vs HTTP POST +# Websockets vs HTTP POST In HTTP POST-based JSON-RPC, every request creates a new HTTP connection, issues the call, waits for the response, and closes the connection. This adds @@ -40,7 +40,7 @@ can be invoked without having to go through a connect/disconnect cycle for every call. In addition, the websocket interface provides other nice features such as the ability to register for asynchronous notifications of various events. -Synchronous vs Asynchronous API +# Synchronous vs Asynchronous API The client provides both a synchronous (blocking) and asynchronous API. @@ -57,7 +57,7 @@ the Receive method on the returned instance will either return the result immediately if it has already arrived, or block until it has. This is useful since it provides the caller with greater control over concurrency. -Notifications +# Notifications The first important part of notifications is to realize that they will only work when connected via websockets. This should intuitively make sense @@ -67,7 +67,7 @@ All notifications provided by btcd require registration to opt-in. For example, if you want to be notified when funds are received by a set of addresses, you register the addresses via the NotifyReceived (or NotifyReceivedAsync) function. -Notification Handlers +# Notification Handlers Notifications are exposed by the client through the use of callback handlers which are setup via a NotificationHandlers instance that is specified by the @@ -83,7 +83,7 @@ will cause a deadlock as more server responses won't be read until the callback returns, but the callback would be waiting for a response. Thus, any additional RPCs must be issued an a completely decoupled manner. -Automatic Reconnection +# Automatic Reconnection By default, when running in websockets mode, this client will automatically keep trying to reconnect to the RPC server should the connection be lost. There @@ -116,7 +116,7 @@ chain services will be available. Depending on your application, you might only need chain-related RPCs. In contrast, btcwallet provides pass through treatment for chain-related RPCs, so it supports them in addition to wallet-related RPCs. -Errors +# Errors There are 3 categories of errors that will be returned throughout this package: @@ -144,35 +144,35 @@ The third category of errors, that is errors returned by the server, can be detected by type asserting the error in a *btcjson.RPCError. For example, to detect if a command is unimplemented by the remote RPC server: - amount, err := client.GetBalance("") - if err != nil { - if jerr, ok := err.(*btcjson.RPCError); ok { - switch jerr.Code { - case btcjson.ErrRPCUnimplemented: - // Handle not implemented error + amount, err := client.GetBalance("") + if err != nil { + if jerr, ok := err.(*btcjson.RPCError); ok { + switch jerr.Code { + case btcjson.ErrRPCUnimplemented: + // Handle not implemented error - // Handle other specific errors you care about - } - } + // Handle other specific errors you care about + } + } - // Log or otherwise handle the error knowing it was not one returned - // from the remote RPC server. - } + // Log or otherwise handle the error knowing it was not one returned + // from the remote RPC server. + } -Example Usage +# Example Usage The following full-blown client examples are in the examples directory: - - bitcoincorehttp - Connects to a bitcoin core RPC server using HTTP POST mode with TLS disabled - and gets the current block count - - btcdwebsockets - Connects to a btcd RPC server using TLS-secured websockets, registers for - block connected and block disconnected notifications, and gets the current - block count - - btcwalletwebsockets - Connects to a btcwallet RPC server using TLS-secured websockets, registers - for notifications about changes to account balances, and gets a list of - unspent transaction outputs (utxos) the wallet can sign + - bitcoincorehttp + Connects to a bitcoin core RPC server using HTTP POST mode with TLS disabled + and gets the current block count + - btcdwebsockets + Connects to a btcd RPC server using TLS-secured websockets, registers for + block connected and block disconnected notifications, and gets the current + block count + - btcwalletwebsockets + Connects to a btcwallet RPC server using TLS-secured websockets, registers + for notifications about changes to account balances, and gets a list of + unspent transaction outputs (utxos) the wallet can sign */ package rpcclient diff --git a/rpcclient/examples/btcdwebsockets/main.go b/rpcclient/examples/btcdwebsockets/main.go index 1f18b9aab4..e3f4c13e40 100644 --- a/rpcclient/examples/btcdwebsockets/main.go +++ b/rpcclient/examples/btcdwebsockets/main.go @@ -10,9 +10,9 @@ import ( "path/filepath" "time" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) func main() { diff --git a/rpcclient/examples/btcwalletwebsockets/main.go b/rpcclient/examples/btcwalletwebsockets/main.go index 7f177e0b1f..3cbd9a3667 100644 --- a/rpcclient/examples/btcwalletwebsockets/main.go +++ b/rpcclient/examples/btcwalletwebsockets/main.go @@ -10,8 +10,8 @@ import ( "path/filepath" "time" - "github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/rpcclient" "github.com/davecgh/go-spew/spew" ) diff --git a/rpcclient/extensions.go b/rpcclient/extensions.go index efb6c1c710..b7517cf26e 100644 --- a/rpcclient/extensions.go +++ b/rpcclient/extensions.go @@ -13,9 +13,9 @@ import ( "fmt" "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) // FutureDebugLevelResult is a future promise to deliver the result of a @@ -56,7 +56,8 @@ func (c *Client) DebugLevelAsync(levelSpec string) FutureDebugLevelResult { // specification. // // The levelspec can be either a debug level or of the form: -// =,=,... +// +// =,=,... // // Additionally, the special keyword 'show' can be used to get a list of the // available subsystems. diff --git a/rpcclient/infrastructure.go b/rpcclient/infrastructure.go index 77d0d8b73f..a32511175b 100644 --- a/rpcclient/infrastructure.go +++ b/rpcclient/infrastructure.go @@ -733,10 +733,10 @@ out: // Reset the connection state and signal the reconnect // has happened. + c.mtx.Lock() c.wsConn = wsConn c.retryCount = 0 - c.mtx.Lock() c.disconnect = make(chan struct{}) c.disconnected = false c.mtx.Unlock() @@ -761,9 +761,7 @@ out: // handleSendPostMessage handles performing the passed HTTP request, reading the // result, unmarshalling it, and delivering the unmarshalled result to the // provided response channel. -func (c *Client) handleSendPostMessage(jReq *jsonRequest, - shutdown chan struct{}) { - +func (c *Client) handleSendPostMessage(jReq *jsonRequest) { protocol := "http" if !c.config.DisableTLS { protocol = "https" @@ -825,7 +823,7 @@ func (c *Client) handleSendPostMessage(jReq *jsonRequest, select { case <-time.After(backoff): - case <-shutdown: + case <-c.shutdown: return } } @@ -834,7 +832,7 @@ func (c *Client) handleSendPostMessage(jReq *jsonRequest, return } - // We still want to return an error if for any reason the respone + // We still want to return an error if for any reason the response // remains empty. if httpResponse == nil { jReq.responseChan <- &Response{ @@ -893,7 +891,7 @@ out: // is closed. select { case jReq := <-c.sendPostChan: - c.handleSendPostMessage(jReq, c.shutdown) + c.handleSendPostMessage(jReq) case <-c.shutdown: break out @@ -917,7 +915,6 @@ cleanup: } c.wg.Done() log.Tracef("RPC client send handler done for %s", c.config.Host) - } // sendPostRequest sends the passed HTTP request to the RPC server using the @@ -931,9 +928,13 @@ func (c *Client) sendPostRequest(jReq *jsonRequest) { default: } - log.Tracef("Sending command [%s] with id %d", jReq.method, jReq.id) + select { + case c.sendPostChan <- jReq: + log.Tracef("Sent command [%s] with id %d", jReq.method, jReq.id) - c.sendPostChan <- jReq + case <-c.shutdown: + return + } } // newFutureError returns a new future result channel that already has the diff --git a/rpcclient/mining.go b/rpcclient/mining.go index 680a63b6d5..9de2f27ed6 100644 --- a/rpcclient/mining.go +++ b/rpcclient/mining.go @@ -10,8 +10,8 @@ import ( "errors" "github.com/btcsuite/btcd/btcjson" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" ) // FutureGenerateResult is a future promise to deliver the result of a diff --git a/rpcclient/notify.go b/rpcclient/notify.go index 225af281cf..1c2814c313 100644 --- a/rpcclient/notify.go +++ b/rpcclient/notify.go @@ -14,9 +14,9 @@ import ( "time" "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) var ( diff --git a/rpcclient/rawtransactions.go b/rpcclient/rawtransactions.go index 3643f2b0ca..1df6195220 100644 --- a/rpcclient/rawtransactions.go +++ b/rpcclient/rawtransactions.go @@ -10,9 +10,9 @@ import ( "encoding/json" "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" ) const ( diff --git a/rpcclient/wallet.go b/rpcclient/wallet.go index 64f1e40afa..7b7e7212c9 100644 --- a/rpcclient/wallet.go +++ b/rpcclient/wallet.go @@ -1016,10 +1016,10 @@ func (c *Client) CreateWalletAsync(name string, opts ...CreateWalletOpt) FutureC // // Optional parameters can be specified using functional-options pattern. The // following functions are available: -// * WithCreateWalletDisablePrivateKeys -// * WithCreateWalletBlank -// * WithCreateWalletPassphrase -// * WithCreateWalletAvoidReuse +// - WithCreateWalletDisablePrivateKeys +// - WithCreateWalletBlank +// - WithCreateWalletPassphrase +// - WithCreateWalletAvoidReuse func (c *Client) CreateWallet(name string, opts ...CreateWalletOpt) (*btcjson.CreateWalletResult, error) { return c.CreateWalletAsync(name, opts...).Receive() } @@ -2661,7 +2661,7 @@ func (c *Client) WalletCreateFundedPsbt( type FutureWalletProcessPsbtResult chan *Response // Receive waits for the Response promised by the future and returns an updated -// PSBT with signed inputs from the wallet and a boolen indicating if the the +// PSBT with signed inputs from the wallet and a boolen indicating if the // transaction has a complete set of signatures. func (r FutureWalletProcessPsbtResult) Receive() (*btcjson.WalletProcessPsbtResult, error) { res, err := ReceiveFuture(r) diff --git a/rpcserver.go b/rpcserver.go index b917263df5..94980c463f 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -146,6 +146,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "getblockhash": handleGetBlockHash, "getblockheader": handleGetBlockHeader, "getblocktemplate": handleGetBlockTemplate, + "getchaintips": handleGetChainTips, "getcfilter": handleGetCFilter, "getcfilterheader": handleGetCFilterHeader, "getconnectioncount": handleGetConnectionCount, @@ -231,7 +232,6 @@ var rpcAskWallet = map[string]struct{}{ // Commands that are currently unimplemented, but should ultimately be. var rpcUnimplemented = map[string]struct{}{ "estimatepriority": {}, - "getchaintips": {}, "getmempoolentry": {}, "getnetworkinfo": {}, "getwork": {}, @@ -266,6 +266,7 @@ var rpcLimited = map[string]struct{}{ "getblockcount": {}, "getblockhash": {}, "getblockheader": {}, + "getchaintips": {}, "getcfilter": {}, "getcfilterheader": {}, "getcurrentnet": {}, @@ -738,6 +739,13 @@ func createVoutList(mtx *wire.MsgTx, chainParams *chaincfg.Params, filterAddrMap vout.ScriptPubKey.Type = scriptClass.String() vout.ScriptPubKey.ReqSigs = int32(reqSigs) + // Address is defined when there's a single well-defined + // receiver address. To spend the output a signature for this, + // and only this, address is required. + if len(encodedAddrs) == 1 && reqSigs <= 1 { + vout.ScriptPubKey.Address = encodedAddrs[0] + } + voutList = append(voutList, vout) } @@ -857,6 +865,13 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{} if scriptClass != txscript.ScriptHashTy { reply.P2sh = p2sh.EncodeAddress() } + + // Address is defined when there's a single well-defined + // receiver address. To spend the output a signature for this, + // and only this, address is required. + if len(addresses) == 1 && reqSigs <= 1 { + reply.Address = addresses[0] + } return reply, nil } @@ -1200,7 +1215,7 @@ func handleGetBlockChainInfo(s *rpcServer, cmd interface{}, closeChan <-chan str BestBlockHash: chainSnapshot.Hash.String(), Difficulty: getDifficultyRatio(chainSnapshot.Bits, params), MedianTime: chainSnapshot.MedianTime.Unix(), - Pruned: false, + Pruned: cfg.Prune != 0, SoftForks: &btcjson.SoftForks{ Bip9SoftForks: make(map[string]*btcjson.Bip9SoftForkDescription), }, @@ -1651,8 +1666,8 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo // Update the merkle root. block := btcutil.NewBlock(template.Block) - merkles := blockchain.BuildMerkleTreeStore(block.Transactions(), false) - template.Block.Header.MerkleRoot = *merkles[len(merkles)-1] + merkleRoot := blockchain.CalcMerkleRoot(block.Transactions(), false) + template.Block.Header.MerkleRoot = merkleRoot } // Set locals for convenience. @@ -2192,6 +2207,28 @@ func handleGetBlockTemplate(s *rpcServer, cmd interface{}, closeChan <-chan stru } } +// handleGetChainTips implements the getchaintips command. +func handleGetChainTips(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + chainTips := s.cfg.Chain.ChainTips() + + ret := make([]btcjson.GetChainTipsResult, 0, len(chainTips)) + for _, chainTip := range chainTips { + ret = append(ret, struct { + Height int32 "json:\"height\"" + Hash string "json:\"hash\"" + BranchLen int32 "json:\"branchlen\"" + Status string "json:\"status\"" + }{ + Height: chainTip.Height, + Hash: chainTip.BlockHash.String(), + BranchLen: chainTip.BranchLen, + Status: chainTip.Status.String(), + }) + } + + return ret, nil +} + // handleGetCFilter implements the getcfilter command. func handleGetCFilter(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { if s.cfg.CfIndex == nil { @@ -2725,6 +2762,7 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i var value int64 var pkScript []byte var isCoinbase bool + var address string includeMempool := true if c.IncludeMempool != nil { includeMempool = *c.IncludeMempool @@ -2798,6 +2836,13 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i addresses[i] = addr.EncodeAddress() } + // Address is defined when there's a single well-defined + // receiver address. To spend the output a signature for this, + // and only this, address is required. + if len(addresses) == 1 && reqSigs <= 1 { + address = addresses[0] + } + txOutReply := &btcjson.GetTxOutResult{ BestBlock: bestBlockHash, Confirmations: int64(confirmations), @@ -2807,10 +2852,12 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i Hex: hex.EncodeToString(pkScript), ReqSigs: int32(reqSigs), Type: scriptClass.String(), + Address: address, Addresses: addresses, }, Coinbase: isCoinbase, } + return txOutReply, nil } diff --git a/rpcserverhelp.go b/rpcserverhelp.go index 16bbb62a2b..f1203de8d9 100644 --- a/rpcserverhelp.go +++ b/rpcserverhelp.go @@ -84,9 +84,10 @@ var helpDescsEnUS = map[string]string{ // ScriptPubKeyResult help. "scriptpubkeyresult-asm": "Disassembly of the script", "scriptpubkeyresult-hex": "Hex-encoded bytes of the script", - "scriptpubkeyresult-reqSigs": "The number of required signatures", + "scriptpubkeyresult-reqSigs": "(DEPRECATED) The number of required signatures", "scriptpubkeyresult-type": "The type of the script (e.g. 'pubkeyhash')", - "scriptpubkeyresult-addresses": "The bitcoin addresses associated with this script", + "scriptpubkeyresult-address": "The bitcoin address associated with this script (only if a well-defined address exists)", + "scriptpubkeyresult-addresses": "(DEPRECATED) The bitcoin addresses associated with this script", // Vout help. "vout-value": "The amount in BTC", @@ -106,9 +107,10 @@ var helpDescsEnUS = map[string]string{ // DecodeScriptResult help. "decodescriptresult-asm": "Disassembly of the script", - "decodescriptresult-reqSigs": "The number of required signatures", + "decodescriptresult-reqSigs": "(DEPRECATED) The number of required signatures", "decodescriptresult-type": "The type of the script (e.g. 'pubkeyhash')", - "decodescriptresult-addresses": "The bitcoin addresses associated with this script", + "decodescriptresult-address": "The bitcoin address associated with this script (only if a well-defined address exists)", + "decodescriptresult-addresses": "(DEPRECATED) The bitcoin addresses associated with this script", "decodescriptresult-p2sh": "The script hash for use in pay-to-script-hash transactions (only present if the provided redeem script is not already a pay-to-script-hash script)", // DecodeScriptCmd help. @@ -347,6 +349,15 @@ var helpDescsEnUS = map[string]string{ "getblocktemplate--condition2": "mode=proposal, accepted", "getblocktemplate--result1": "An error string which represents why the proposal was rejected or nothing if accepted", + // GetChainTipsResult help. + "getchaintipsresult-chaintips": "The chaintips that this node is aware of", + "getchaintipsresult-height": "The height of the chain tip", + "getchaintipsresult-hash": "The block hash of the chain tip", + "getchaintipsresult-branchlen": "Returns zero for main chain. Otherwise is the length of branch connecting the tip to the main chain", + "getchaintipsresult-status": "Status of the chain. Returns \"active\" for the main chain", + // GetChainTipsCmd help. + "getchaintips--synopsis": "Returns information about all known tips in the block tree, including the main chain as well as orphaned branches.", + // GetCFilterCmd help. "getcfilter--synopsis": "Returns a block's committed filter given its hash.", "getcfilter-filtertype": "The type of filter to return (0=regular)", @@ -728,6 +739,7 @@ var rpcResultTypes = map[string][]interface{}{ "getblockheader": {(*string)(nil), (*btcjson.GetBlockHeaderVerboseResult)(nil)}, "getblocktemplate": {(*btcjson.GetBlockTemplateResult)(nil), (*string)(nil), nil}, "getblockchaininfo": {(*btcjson.GetBlockChainInfoResult)(nil)}, + "getchaintips": {(*[]btcjson.GetChainTipsResult)(nil)}, "getcfilter": {(*string)(nil)}, "getcfilterheader": {(*string)(nil)}, "getconnectioncount": {(*int32)(nil)}, diff --git a/rpcwebsocket.go b/rpcwebsocket.go index 4d140b4825..aedbcf90b6 100644 --- a/rpcwebsocket.go +++ b/rpcwebsocket.go @@ -22,12 +22,12 @@ import ( "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/database" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/websocket" "golang.org/x/crypto/ripemd160" ) diff --git a/server.go b/server.go index 5ef2e31942..356326ab1c 100644 --- a/server.go +++ b/server.go @@ -44,8 +44,8 @@ import ( const ( // defaultServices describes the default services that are supported by // the server. - defaultServices = wire.SFNodeNetwork | wire.SFNodeBloom | - wire.SFNodeWitness | wire.SFNodeCF + defaultServices = wire.SFNodeNetwork | wire.SFNodeNetworkLimited | + wire.SFNodeBloom | wire.SFNodeWitness | wire.SFNodeCF // defaultRequiredServices describes the default services that are // required to be supported by outbound peers. @@ -2730,6 +2730,9 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, if cfg.NoCFilters { services &^= wire.SFNodeCF } + if cfg.Prune != 0 { + services &^= wire.SFNodeNetwork + } amgr := addrmgr.New(cfg.DataDir, btcdLookup) @@ -2823,14 +2826,16 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, // Create a new block chain instance with the appropriate configuration. var err error s.chain, err = blockchain.New(&blockchain.Config{ - DB: s.db, - Interrupt: interrupt, - ChainParams: s.chainParams, - Checkpoints: checkpoints, - TimeSource: s.timeSource, - SigCache: s.sigCache, - IndexManager: indexManager, - HashCache: s.hashCache, + DB: s.db, + Interrupt: interrupt, + ChainParams: s.chainParams, + Checkpoints: checkpoints, + TimeSource: s.timeSource, + SigCache: s.sigCache, + IndexManager: indexManager, + HashCache: s.hashCache, + Prune: cfg.Prune * 1024 * 1024, + UtxoCacheMaxSize: uint64(cfg.UtxoCacheMaxSizeMiB) * 1024 * 1024, }) if err != nil { return nil, err diff --git a/signalsigterm.go b/signalsigterm.go index 831655010e..63bdb9c01d 100644 --- a/signalsigterm.go +++ b/signalsigterm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package main diff --git a/txscript/doc.go b/txscript/doc.go index 7da521615a..d6eddd5a65 100644 --- a/txscript/doc.go +++ b/txscript/doc.go @@ -12,7 +12,7 @@ overview to provide information on how to use the package. This package provides data structures and functions to parse and execute bitcoin transaction scripts. -Script Overview +# Script Overview Bitcoin transaction scripts are written in a stack-base, FORTH-like language. @@ -25,12 +25,12 @@ to right and intentionally do not provide loops. The vast majority of Bitcoin scripts at the time of this writing are of several standard forms which consist of a spender providing a public key and a signature which proves the spender owns the associated private key. This information -is used to prove the the spender is authorized to perform the transaction. +is used to prove the spender is authorized to perform the transaction. One benefit of using a scripting language is added flexibility in specifying what conditions must be met in order to spend bitcoins. -Errors +# Errors Errors returned by this package are of type txscript.Error. This allows the caller to programmatically determine the specific error by examining the diff --git a/txscript/engine.go b/txscript/engine.go index 7dfd092eae..30206152b8 100644 --- a/txscript/engine.go +++ b/txscript/engine.go @@ -285,6 +285,33 @@ type Engine struct { witnessProgram []byte inputAmount int64 taprootCtx *taprootExecutionCtx + + // stepCallback is an optional function that will be called every time + // a step has been performed during script execution. + // + // NOTE: This is only meant to be used in debugging, and SHOULD NOT BE + // USED during regular operation. + stepCallback func(*StepInfo) error +} + +// StepInfo houses the current VM state information that is passed back to the +// stepCallback during script execution. +type StepInfo struct { + // ScriptIndex is the index of the script currently being executed by + // the Engine. + ScriptIndex int + + // OpcodeIndex is the index of the next opcode that will be executed. + // In case the execution has completed, the opcode index will be + // incrementet beyond the number of the current script's opcodes. This + // indicates no new script is being executed, and execution is done. + OpcodeIndex int + + // Stack is the Engine's current content on the stack: + Stack [][]byte + + // AltStack is the Engine's current content on the alt stack. + AltStack [][]byte } // hasFlag returns whether the script engine instance has the passed flag set. @@ -1023,6 +1050,17 @@ func (vm *Engine) Step() (done bool, err error) { return false, nil } +// copyStack makes a deep copy of the provided slice. +func copyStack(stk [][]byte) [][]byte { + c := make([][]byte, len(stk)) + for i := range stk { + c[i] = make([]byte, len(stk[i])) + copy(c[i][:], stk[i][:]) + } + + return c +} + // Execute will execute all scripts in the script engine and return either nil // for successful validation or an error if one occurred. func (vm *Engine) Execute() (err error) { @@ -1033,6 +1071,22 @@ func (vm *Engine) Execute() (err error) { return nil } + // If the stepCallback is set, we start by making a call back with the + // initial engine state. + var stepInfo *StepInfo + if vm.stepCallback != nil { + stepInfo = &StepInfo{ + ScriptIndex: vm.scriptIdx, + OpcodeIndex: vm.opcodeIdx, + Stack: copyStack(vm.dstack.stk), + AltStack: copyStack(vm.astack.stk), + } + err := vm.stepCallback(stepInfo) + if err != nil { + return err + } + } + done := false for !done { log.Tracef("%v", newLogClosure(func() string { @@ -1060,6 +1114,31 @@ func (vm *Engine) Execute() (err error) { return dstr + astr })) + + if vm.stepCallback != nil { + scriptIdx := vm.scriptIdx + opcodeIdx := vm.opcodeIdx + + // In case the execution has completed, we keep the + // current script index while increasing the opcode + // index. This is to indicate that no new script is + // being executed. + if done { + scriptIdx = stepInfo.ScriptIndex + opcodeIdx = stepInfo.OpcodeIndex + 1 + } + + stepInfo = &StepInfo{ + ScriptIndex: scriptIdx, + OpcodeIndex: opcodeIdx, + Stack: copyStack(vm.dstack.stk), + AltStack: copyStack(vm.astack.stk), + } + err := vm.stepCallback(stepInfo) + if err != nil { + return err + } + } } return vm.CheckErrorCondition(true) @@ -1549,3 +1628,22 @@ func NewEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, flags ScriptFlags return &vm, nil } + +// NewEngine returns a new script engine with a script execution callback set. +// This is useful for debugging script execution. +func NewDebugEngine(scriptPubKey []byte, tx *wire.MsgTx, txIdx int, + flags ScriptFlags, sigCache *SigCache, hashCache *TxSigHashes, + inputAmount int64, prevOutFetcher PrevOutputFetcher, + stepCallback func(*StepInfo) error) (*Engine, error) { + + vm, err := NewEngine( + scriptPubKey, tx, txIdx, flags, sigCache, hashCache, + inputAmount, prevOutFetcher, + ) + if err != nil { + return nil, err + } + + vm.stepCallback = stepCallback + return vm, nil +} diff --git a/txscript/engine_debug_test.go b/txscript/engine_debug_test.go new file mode 100644 index 0000000000..5ebfe3f3cf --- /dev/null +++ b/txscript/engine_debug_test.go @@ -0,0 +1,178 @@ +// Copyright (c) 2013-2023 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package txscript + +import ( + "testing" + + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" +) + +// TestDebugEngine checks that the StepCallbck called during debug script +// execution contains the expected data. +func TestDebugEngine(t *testing.T) { + t.Parallel() + + // We'll generate a private key and a signature for the tx. + privKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + internalKey := privKey.PubKey() + + // We use a simple script that will utilize both the stack and alt + // stack in order to test the step callback, and wrap it in a taproot + // witness script. + builder := NewScriptBuilder() + builder.AddData([]byte{0xab}) + builder.AddOp(OP_TOALTSTACK) + builder.AddData(schnorr.SerializePubKey(internalKey)) + builder.AddOp(OP_CHECKSIG) + builder.AddOp(OP_VERIFY) + builder.AddOp(OP_1) + pkScript, err := builder.Script() + require.NoError(t, err) + + tapLeaf := NewBaseTapLeaf(pkScript) + tapScriptTree := AssembleTaprootScriptTree(tapLeaf) + + ctrlBlock := tapScriptTree.LeafMerkleProofs[0].ToControlBlock( + internalKey, + ) + + tapScriptRootHash := tapScriptTree.RootNode.TapHash() + outputKey := ComputeTaprootOutputKey( + internalKey, tapScriptRootHash[:], + ) + p2trScript, err := PayToTaprootScript(outputKey) + require.NoError(t, err) + + testTx := wire.NewMsgTx(2) + testTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Index: 1, + }, + }) + txOut := &wire.TxOut{ + Value: 1e8, PkScript: p2trScript, + } + testTx.AddTxOut(txOut) + + prevFetcher := NewCannedPrevOutputFetcher( + txOut.PkScript, txOut.Value, + ) + sigHashes := NewTxSigHashes(testTx, prevFetcher) + + sig, err := RawTxInTapscriptSignature( + testTx, sigHashes, 0, txOut.Value, + txOut.PkScript, tapLeaf, + SigHashDefault, privKey, + ) + require.NoError(t, err) + + // Now that we have the sig, we'll make a valid witness + // including the control block. + ctrlBlockBytes, err := ctrlBlock.ToBytes() + require.NoError(t, err) + txCopy := testTx.Copy() + txCopy.TxIn[0].Witness = wire.TxWitness{ + sig, pkScript, ctrlBlockBytes, + } + + expCallback := []StepInfo{ + // First callback is looking at the OP_1 witness version. + { + ScriptIndex: 1, + OpcodeIndex: 0, + Stack: [][]byte{}, + AltStack: [][]byte{}, + }, + // The OP_1 witness version is pushed to stack, + { + ScriptIndex: 1, + OpcodeIndex: 1, + Stack: [][]byte{{0x01}}, + AltStack: [][]byte{}, + }, + // Then the taproot script is being executed, starting with + // only the signature on the stacks. + { + ScriptIndex: 2, + OpcodeIndex: 0, + Stack: [][]byte{sig}, + AltStack: [][]byte{}, + }, + // 0xab is pushed to the stack. + { + ScriptIndex: 2, + OpcodeIndex: 1, + Stack: [][]byte{sig, {0xab}}, + AltStack: [][]byte{}, + }, + // 0xab is moved to the alt stack. + { + ScriptIndex: 2, + OpcodeIndex: 2, + Stack: [][]byte{sig}, + AltStack: [][]byte{{0xab}}, + }, + // The public key is pushed to the stack. + { + ScriptIndex: 2, + OpcodeIndex: 3, + Stack: [][]byte{ + sig, + schnorr.SerializePubKey(internalKey), + }, + AltStack: [][]byte{{0xab}}, + }, + // OP_CHECKSIG is executed, resulting in 0x01 on the stack. + { + ScriptIndex: 2, + OpcodeIndex: 4, + Stack: [][]byte{ + {0x01}, + }, + AltStack: [][]byte{{0xab}}, + }, + // OP_VERIFY pops and checks the top stack element. + { + ScriptIndex: 2, + OpcodeIndex: 5, + Stack: [][]byte{}, + AltStack: [][]byte{{0xab}}, + }, + // A single OP_1 push completes the script execution (note that + // the alt stack is cleared when the script is "done"). + { + ScriptIndex: 2, + OpcodeIndex: 6, + Stack: [][]byte{{0x01}}, + AltStack: [][]byte{}, + }, + } + + stepIndex := 0 + callback := func(s *StepInfo) error { + require.Less( + t, stepIndex, len(expCallback), "unexpected callback", + ) + + require.Equal(t, &expCallback[stepIndex], s) + stepIndex++ + return nil + } + + // Run the debug engine. + vm, err := NewDebugEngine( + txOut.PkScript, txCopy, 0, StandardVerifyFlags, + nil, sigHashes, txOut.Value, prevFetcher, + callback, + ) + require.NoError(t, err) + require.NoError(t, vm.Execute()) +} diff --git a/txscript/error.go b/txscript/error.go index 072778a268..1f046b9612 100644 --- a/txscript/error.go +++ b/txscript/error.go @@ -506,10 +506,10 @@ func (e ErrorCode) String() string { // Error identifies a script-related error. It is used to indicate three // classes of errors: -// 1) Script execution failures due to violating one of the many requirements -// imposed by the script engine or evaluating to false -// 2) Improper API usage by callers -// 3) Internal consistency check failures +// 1. Script execution failures due to violating one of the many requirements +// imposed by the script engine or evaluating to false +// 2. Improper API usage by callers +// 3. Internal consistency check failures // // The caller can use type assertions on the returned errors to access the // ErrorCode field to ascertain the specific reason for the error. As an diff --git a/txscript/opcode.go b/txscript/opcode.go index 6e0434423e..4918b991c5 100644 --- a/txscript/opcode.go +++ b/txscript/opcode.go @@ -1073,7 +1073,7 @@ func opcodeCheckLockTimeVerify(op *opcode, data []byte, vm *Engine) error { if err != nil { return err } - lockTime, err := makeScriptNum(so, vm.dstack.verifyMinimalData, 5) + lockTime, err := MakeScriptNum(so, vm.dstack.verifyMinimalData, 5) if err != nil { return err } @@ -1147,7 +1147,7 @@ func opcodeCheckSequenceVerify(op *opcode, data []byte, vm *Engine) error { if err != nil { return err } - stackSequence, err := makeScriptNum(so, vm.dstack.verifyMinimalData, 5) + stackSequence, err := MakeScriptNum(so, vm.dstack.verifyMinimalData, 5) if err != nil { return err } @@ -1171,7 +1171,7 @@ func opcodeCheckSequenceVerify(op *opcode, data []byte, vm *Engine) error { // Transaction version numbers not high enough to trigger CSV rules must // fail. - if vm.tx.Version < 2 { + if uint32(vm.tx.Version) < 2 { str := fmt.Sprintf("invalid transaction version: %d", vm.tx.Version) return scriptError(ErrUnsatisfiedLockTime, str) diff --git a/txscript/script.go b/txscript/script.go index 602c1fced1..18723067ee 100644 --- a/txscript/script.go +++ b/txscript/script.go @@ -40,13 +40,13 @@ const ( MaxScriptElementSize = 520 // Max bytes pushable to the stack. ) -// isSmallInt returns whether or not the opcode is considered a small integer, +// IsSmallInt returns whether or not the opcode is considered a small integer, // which is an OP_0, or OP_1 through OP_16. // // NOTE: This function is only valid for version 0 opcodes. Since the function // does not accept a script version, the results are undefined for other script // versions. -func isSmallInt(op byte) bool { +func IsSmallInt(op byte) bool { return op == OP_0 || (op >= OP_1 && op <= OP_16) } @@ -74,19 +74,19 @@ func IsPayToScriptHash(script []byte) bool { return isScriptHashScript(script) } -// IsPayToWitnessScriptHash returns true if the is in the standard +// IsPayToWitnessScriptHash returns true if the script is in the standard // pay-to-witness-script-hash (P2WSH) format, false otherwise. func IsPayToWitnessScriptHash(script []byte) bool { return isWitnessScriptHashScript(script) } -// IsPayToWitnessPubKeyHash returns true if the is in the standard +// IsPayToWitnessPubKeyHash returns true if the script is in the standard // pay-to-witness-pubkey-hash (P2WKH) format, false otherwise. func IsPayToWitnessPubKeyHash(script []byte) bool { return isWitnessPubKeyHashScript(script) } -// IsPayToTaproot returns true if if the passed script is a standard +// IsPayToTaproot returns true if the passed script is a standard // pay-to-taproot (PTTR) scripts, and false otherwise. func IsPayToTaproot(script []byte) bool { return isWitnessTaprootScript(script) @@ -294,9 +294,9 @@ func removeOpcodeByData(script []byte, dataToRemove []byte) []byte { return result } -// asSmallInt returns the passed opcode, which must be true according to -// isSmallInt(), as an integer. -func asSmallInt(op byte) int { +// AsSmallInt returns the passed opcode, which must be true according to +// IsSmallInt(), as an integer. +func AsSmallInt(op byte) int { if op == OP_0 { return 0 } @@ -342,7 +342,7 @@ func countSigOpsV0(script []byte, precise bool) int { // operations in new script versions should move to // aggregated schemes such as Schnorr instead. if precise && prevOp >= OP_1 && prevOp <= OP_16 { - numSigOps += asSmallInt(prevOp) + numSigOps += AsSmallInt(prevOp) } else { numSigOps += MaxPubKeysPerMultiSig } @@ -504,7 +504,7 @@ func checkScriptParses(scriptVersion uint16, script []byte) error { } // IsUnspendable returns whether the passed public key script is unspendable, or -// guaranteed to fail at execution. This allows inputs to be pruned instantly +// guaranteed to fail at execution. This allows outputs to be pruned instantly // when entering the UTXO set. // // NOTE: This function is only valid for version 0 scripts. Since the function diff --git a/txscript/scriptbuilder.go b/txscript/scriptbuilder.go index 7984dd9661..fa2bc073f0 100644 --- a/txscript/scriptbuilder.go +++ b/txscript/scriptbuilder.go @@ -13,11 +13,41 @@ const ( // defaultScriptAlloc is the default size used for the backing array // for a script being built by the ScriptBuilder. The array will // dynamically grow as needed, but this figure is intended to provide - // enough space for vast majority of scripts without needing to grow the - // backing array multiple times. + // enough space for the vast majority of scripts without needing to grow + // the backing array multiple times. Can be overwritten with the + // WithScriptAllocSize functional option where expected script sizes are + // known. defaultScriptAlloc = 500 ) +// scriptBuilderConfig is a configuration struct that can be used to modify the +// initialization of a ScriptBuilder. +type scriptBuilderConfig struct { + // allocSize specifies the initial size of the backing array for the + // script builder. + allocSize int +} + +// defaultScriptBuilderConfig returns a new scriptBuilderConfig with the +// default values set. +func defaultScriptBuilderConfig() *scriptBuilderConfig { + return &scriptBuilderConfig{ + allocSize: defaultScriptAlloc, + } +} + +// ScriptBuilderOpt is a functional option type which is used to modify the +// initialization of a ScriptBuilder. +type ScriptBuilderOpt func(*scriptBuilderConfig) + +// WithScriptAllocSize specifies the initial size of the backing array for the +// script builder. +func WithScriptAllocSize(size int) ScriptBuilderOpt { + return func(cfg *scriptBuilderConfig) { + cfg.allocSize = size + } +} + // ErrScriptNotCanonical identifies a non-canonical script. The caller can use // a type assertion to detect this error type. type ErrScriptNotCanonical string @@ -37,16 +67,17 @@ func (e ErrScriptNotCanonical) Error() string { // For example, the following would build a 2-of-3 multisig script for usage in // a pay-to-script-hash (although in this situation MultiSigScript() would be a // better choice to generate the script): -// builder := txscript.NewScriptBuilder() -// builder.AddOp(txscript.OP_2).AddData(pubKey1).AddData(pubKey2) -// builder.AddData(pubKey3).AddOp(txscript.OP_3) -// builder.AddOp(txscript.OP_CHECKMULTISIG) -// script, err := builder.Script() -// if err != nil { -// // Handle the error. -// return -// } -// fmt.Printf("Final multi-sig script: %x\n", script) +// +// builder := txscript.NewScriptBuilder() +// builder.AddOp(txscript.OP_2).AddData(pubKey1).AddData(pubKey2) +// builder.AddData(pubKey3).AddOp(txscript.OP_3) +// builder.AddOp(txscript.OP_CHECKMULTISIG) +// script, err := builder.Script() +// if err != nil { +// // Handle the error. +// return +// } +// fmt.Printf("Final multi-sig script: %x\n", script) type ScriptBuilder struct { script []byte err error @@ -267,8 +298,13 @@ func (b *ScriptBuilder) Script() ([]byte, error) { // NewScriptBuilder returns a new instance of a script builder. See // ScriptBuilder for details. -func NewScriptBuilder() *ScriptBuilder { +func NewScriptBuilder(opts ...ScriptBuilderOpt) *ScriptBuilder { + cfg := defaultScriptBuilderConfig() + for _, opt := range opts { + opt(cfg) + } + return &ScriptBuilder{ - script: make([]byte, 0, defaultScriptAlloc), + script: make([]byte, 0, cfg.allocSize), } } diff --git a/txscript/scriptbuilder_test.go b/txscript/scriptbuilder_test.go index 89f2b861ab..baf9526a26 100644 --- a/txscript/scriptbuilder_test.go +++ b/txscript/scriptbuilder_test.go @@ -7,8 +7,38 @@ package txscript import ( "bytes" "testing" + + "github.com/stretchr/testify/require" ) +// TestScriptBuilderAlloc tests that the pre-allocation for a script via the +// NewScriptBuilder function works as expected. +func TestScriptBuilderAlloc(t *testing.T) { + // Using the default value, we should get a script with a capacity of + // 500 bytes, which is quite large for most scripts. + defaultBuilder := NewScriptBuilder() + require.EqualValues(t, defaultScriptAlloc, cap(defaultBuilder.script)) + + const allocSize = 23 + builder := NewScriptBuilder(WithScriptAllocSize(allocSize)) + + // The initial capacity of the script should be set to the explicit + // value. + require.EqualValues(t, allocSize, cap(builder.script)) + + builder.AddOp(OP_HASH160) + builder.AddData(make([]byte, 20)) + builder.AddOp(OP_EQUAL) + script, err := builder.Script() + require.NoError(t, err) + + require.Len(t, script, allocSize) + + // The capacity shouldn't have changed, as the script should've fit just + // fine. + require.EqualValues(t, allocSize, cap(builder.script)) +} + // TestScriptBuilderAddOp tests that pushing opcodes to a script via the // ScriptBuilder API works as expected. func TestScriptBuilderAddOp(t *testing.T) { diff --git a/txscript/scriptnum.go b/txscript/scriptnum.go index 81f2636121..550b01e1ff 100644 --- a/txscript/scriptnum.go +++ b/txscript/scriptnum.go @@ -51,7 +51,7 @@ const ( // method to get the serialized representation (including values that overflow). // // Then, whenever data is interpreted as an integer, it is converted to this -// type by using the makeScriptNum function which will return an error if the +// type by using the MakeScriptNum function which will return an error if the // number is out of range or not minimally encoded depending on parameters. // Since all numeric opcodes involve pulling data from the stack and // interpreting it as an integer, it provides the required behavior. @@ -89,18 +89,19 @@ func checkMinimalDataEncoding(v []byte) error { // Bytes returns the number serialized as a little endian with a sign bit. // // Example encodings: -// 127 -> [0x7f] -// -127 -> [0xff] -// 128 -> [0x80 0x00] -// -128 -> [0x80 0x80] -// 129 -> [0x81 0x00] -// -129 -> [0x81 0x80] -// 256 -> [0x00 0x01] -// -256 -> [0x00 0x81] -// 32767 -> [0xff 0x7f] -// -32767 -> [0xff 0xff] -// 32768 -> [0x00 0x80 0x00] -// -32768 -> [0x00 0x80 0x80] +// +// 127 -> [0x7f] +// -127 -> [0xff] +// 128 -> [0x80 0x00] +// -128 -> [0x80 0x80] +// 129 -> [0x81 0x00] +// -129 -> [0x81 0x80] +// 256 -> [0x00 0x01] +// -256 -> [0x00 0x81] +// 32767 -> [0xff 0x7f] +// -32767 -> [0xff 0xff] +// 32768 -> [0x00 0x80 0x00] +// -32768 -> [0x00 0x80 0x80] func (n scriptNum) Bytes() []byte { // Zero encodes as an empty byte slice. if n == 0 { @@ -151,7 +152,7 @@ func (n scriptNum) Bytes() []byte { // provide this behavior. // // In practice, for most opcodes, the number should never be out of range since -// it will have been created with makeScriptNum using the defaultScriptLen +// it will have been created with MakeScriptNum using the defaultScriptLen // value, which rejects them. In case something in the future ends up calling // this function against the result of some arithmetic, which IS allowed to be // out of range before being reinterpreted as an integer, this will provide the @@ -168,7 +169,7 @@ func (n scriptNum) Int32() int32 { return int32(n) } -// makeScriptNum interprets the passed serialized bytes as an encoded integer +// MakeScriptNum interprets the passed serialized bytes as an encoded integer // and returns the result as a script number. // // Since the consensus rules dictate that serialized bytes interpreted as ints @@ -194,9 +195,9 @@ func (n scriptNum) Int32() int32 { // overflows. // // See the Bytes function documentation for example encodings. -func makeScriptNum(v []byte, requireMinimal bool, scriptNumLen int) (scriptNum, error) { +func MakeScriptNum(v []byte, requireMinimal bool, scriptNumLen int) (scriptNum, error) { // Interpreting data requires that it is not larger than - // the the passed scriptNumLen value. + // the passed scriptNumLen value. if len(v) > scriptNumLen { str := fmt.Sprintf("numeric value encoded as %x is %d bytes "+ "which exceeds the max allowed of %d", v, len(v), diff --git a/txscript/scriptnum_test.go b/txscript/scriptnum_test.go index 668f912f6f..9aba3a7062 100644 --- a/txscript/scriptnum_test.go +++ b/txscript/scriptnum_test.go @@ -195,15 +195,15 @@ func TestMakeScriptNum(t *testing.T) { for _, test := range tests { // Ensure the error code is of the expected type and the error // code matches the value specified in the test instance. - gotNum, err := makeScriptNum(test.serialized, test.minimalEncoding, + gotNum, err := MakeScriptNum(test.serialized, test.minimalEncoding, test.numLen) if e := tstCheckScriptError(err, test.err); e != nil { - t.Errorf("makeScriptNum(%#x): %v", test.serialized, e) + t.Errorf("MakeScriptNum(%#x): %v", test.serialized, e) continue } if gotNum != test.num { - t.Errorf("makeScriptNum(%#x): did not get expected "+ + t.Errorf("MakeScriptNum(%#x): did not get expected "+ "number - got %d, want %d", test.serialized, gotNum, test.num) continue diff --git a/txscript/sighash.go b/txscript/sighash.go index eaae070d5c..16c3c19c18 100644 --- a/txscript/sighash.go +++ b/txscript/sighash.go @@ -169,10 +169,18 @@ func calcSignatureHash(sigScript []byte, hashType SigHashType, tx *wire.MsgTx, i // The final hash is the double sha256 of both the serialized modified // transaction and the hash type (encoded as a 4-byte little-endian // value) appended. - wbuf := bytes.NewBuffer(make([]byte, 0, txCopy.SerializeSizeStripped()+4)) - txCopy.SerializeNoWitness(wbuf) - binary.Write(wbuf, binary.LittleEndian, hashType) - return chainhash.DoubleHashB(wbuf.Bytes()) + sigHashBytes := chainhash.DoubleHashRaw(func(w io.Writer) error { + if err := txCopy.SerializeNoWitness(w); err != nil { + return err + } + err := binary.Write(w, binary.LittleEndian, hashType) + if err != nil { + return err + } + return nil + }) + + return sigHashBytes[:] } // calcWitnessSignatureHashRaw computes the sighash digest of a transaction's @@ -197,99 +205,107 @@ func calcWitnessSignatureHashRaw(subScript []byte, sigHashes *TxSigHashes, return nil, fmt.Errorf("idx %d but %d txins", idx, len(tx.TxIn)) } - // We'll utilize this buffer throughout to incrementally calculate - // the signature hash for this transaction. - var sigHash bytes.Buffer - - // First write out, then encode the transaction's version number. - var bVersion [4]byte - binary.LittleEndian.PutUint32(bVersion[:], uint32(tx.Version)) - sigHash.Write(bVersion[:]) - - // Next write out the possibly pre-calculated hashes for the sequence - // numbers of all inputs, and the hashes of the previous outs for all - // outputs. - var zeroHash chainhash.Hash - - // If anyone can pay isn't active, then we can use the cached - // hashPrevOuts, otherwise we just write zeroes for the prev outs. - if hashType&SigHashAnyOneCanPay == 0 { - sigHash.Write(sigHashes.HashPrevOutsV0[:]) - } else { - sigHash.Write(zeroHash[:]) - } + sigHashBytes := chainhash.DoubleHashRaw(func(w io.Writer) error { + var scratch [8]byte - // If the sighash isn't anyone can pay, single, or none, the use the - // cached hash sequences, otherwise write all zeroes for the - // hashSequence. - if hashType&SigHashAnyOneCanPay == 0 && - hashType&sigHashMask != SigHashSingle && - hashType&sigHashMask != SigHashNone { - sigHash.Write(sigHashes.HashSequenceV0[:]) - } else { - sigHash.Write(zeroHash[:]) - } + // First write out, then encode the transaction's version + // number. + binary.LittleEndian.PutUint32(scratch[:], uint32(tx.Version)) + w.Write(scratch[:4]) - txIn := tx.TxIn[idx] - - // Next, write the outpoint being spent. - sigHash.Write(txIn.PreviousOutPoint.Hash[:]) - var bIndex [4]byte - binary.LittleEndian.PutUint32(bIndex[:], txIn.PreviousOutPoint.Index) - sigHash.Write(bIndex[:]) - - if isWitnessPubKeyHashScript(subScript) { - // The script code for a p2wkh is a length prefix varint for - // the next 25 bytes, followed by a re-creation of the original - // p2pkh pk script. - sigHash.Write([]byte{0x19}) - sigHash.Write([]byte{OP_DUP}) - sigHash.Write([]byte{OP_HASH160}) - sigHash.Write([]byte{OP_DATA_20}) - sigHash.Write(extractWitnessPubKeyHash(subScript)) - sigHash.Write([]byte{OP_EQUALVERIFY}) - sigHash.Write([]byte{OP_CHECKSIG}) - } else { - // For p2wsh outputs, and future outputs, the script code is - // the original script, with all code separators removed, - // serialized with a var int length prefix. - wire.WriteVarBytes(&sigHash, 0, subScript) - } + // Next write out the possibly pre-calculated hashes for the + // sequence numbers of all inputs, and the hashes of the + // previous outs for all outputs. + var zeroHash chainhash.Hash - // Next, add the input amount, and sequence number of the input being - // signed. - var bAmount [8]byte - binary.LittleEndian.PutUint64(bAmount[:], uint64(amt)) - sigHash.Write(bAmount[:]) - var bSequence [4]byte - binary.LittleEndian.PutUint32(bSequence[:], txIn.Sequence) - sigHash.Write(bSequence[:]) - - // If the current signature mode isn't single, or none, then we can - // re-use the pre-generated hashoutputs sighash fragment. Otherwise, - // we'll serialize and add only the target output index to the signature - // pre-image. - if hashType&sigHashMask != SigHashSingle && - hashType&sigHashMask != SigHashNone { - sigHash.Write(sigHashes.HashOutputsV0[:]) - } else if hashType&sigHashMask == SigHashSingle && idx < len(tx.TxOut) { - var b bytes.Buffer - wire.WriteTxOut(&b, 0, 0, tx.TxOut[idx]) - sigHash.Write(chainhash.DoubleHashB(b.Bytes())) - } else { - sigHash.Write(zeroHash[:]) - } + // If anyone can pay isn't active, then we can use the cached + // hashPrevOuts, otherwise we just write zeroes for the prev + // outs. + if hashType&SigHashAnyOneCanPay == 0 { + w.Write(sigHashes.HashPrevOutsV0[:]) + } else { + w.Write(zeroHash[:]) + } + + // If the sighash isn't anyone can pay, single, or none, the + // use the cached hash sequences, otherwise write all zeroes + // for the hashSequence. + if hashType&SigHashAnyOneCanPay == 0 && + hashType&sigHashMask != SigHashSingle && + hashType&sigHashMask != SigHashNone { + + w.Write(sigHashes.HashSequenceV0[:]) + } else { + w.Write(zeroHash[:]) + } - // Finally, write out the transaction's locktime, and the sig hash - // type. - var bLockTime [4]byte - binary.LittleEndian.PutUint32(bLockTime[:], tx.LockTime) - sigHash.Write(bLockTime[:]) - var bHashType [4]byte - binary.LittleEndian.PutUint32(bHashType[:], uint32(hashType)) - sigHash.Write(bHashType[:]) + txIn := tx.TxIn[idx] + + // Next, write the outpoint being spent. + w.Write(txIn.PreviousOutPoint.Hash[:]) + var bIndex [4]byte + binary.LittleEndian.PutUint32( + bIndex[:], txIn.PreviousOutPoint.Index, + ) + w.Write(bIndex[:]) + + if isWitnessPubKeyHashScript(subScript) { + // The script code for a p2wkh is a length prefix + // varint for the next 25 bytes, followed by a + // re-creation of the original p2pkh pk script. + w.Write([]byte{0x19}) + w.Write([]byte{OP_DUP}) + w.Write([]byte{OP_HASH160}) + w.Write([]byte{OP_DATA_20}) + w.Write(extractWitnessPubKeyHash(subScript)) + w.Write([]byte{OP_EQUALVERIFY}) + w.Write([]byte{OP_CHECKSIG}) + } else { + // For p2wsh outputs, and future outputs, the script + // code is the original script, with all code + // separators removed, serialized with a var int length + // prefix. + wire.WriteVarBytes(w, 0, subScript) + } + + // Next, add the input amount, and sequence number of the input + // being signed. + binary.LittleEndian.PutUint64(scratch[:], uint64(amt)) + w.Write(scratch[:]) + binary.LittleEndian.PutUint32(scratch[:], txIn.Sequence) + w.Write(scratch[:4]) + + // If the current signature mode isn't single, or none, then we + // can re-use the pre-generated hashoutputs sighash fragment. + // Otherwise, we'll serialize and add only the target output + // index to the signature pre-image. + if hashType&sigHashMask != SigHashSingle && + hashType&sigHashMask != SigHashNone { + + w.Write(sigHashes.HashOutputsV0[:]) + } else if hashType&sigHashMask == SigHashSingle && + idx < len(tx.TxOut) { + + h := chainhash.DoubleHashRaw(func(tw io.Writer) error { + wire.WriteTxOut(tw, 0, 0, tx.TxOut[idx]) + return nil + }) + w.Write(h[:]) + } else { + w.Write(zeroHash[:]) + } + + // Finally, write out the transaction's locktime, and the sig + // hash type. + binary.LittleEndian.PutUint32(scratch[:], tx.LockTime) + w.Write(scratch[:4]) + binary.LittleEndian.PutUint32(scratch[:], uint32(hashType)) + w.Write(scratch[:4]) + + return nil + }) - return chainhash.DoubleHashB(sigHash.Bytes()), nil + return sigHashBytes[:], nil } // CalcWitnessSigHash computes the sighash digest for the specified input of @@ -348,7 +364,7 @@ type taprootSigHashOptions struct { codeSepPos uint32 } -// writeDigestExtensions writes out the sighah mesage extensiosn defined by the +// writeDigestExtensions writes out the sighash message extension defined by the // current active sigHashExtFlags. func (t *taprootSigHashOptions) writeDigestExtensions(w io.Writer) error { switch t.extFlag { @@ -588,7 +604,7 @@ func calcTaprootSignatureHashRaw(sigHashes *TxSigHashes, hType SigHashType, // CalcTaprootSignatureHash computes the sighash digest of a transaction's // taproot-spending input using the new sighash digest algorithm described in -// BIP 341. As the new digest algoriths may require the digest to commit to the +// BIP 341. As the new digest algorithms may require the digest to commit to the // entire prev output, a PrevOutputFetcher argument is required to obtain the // needed information. The TxSigHashes pre-computed sighash midstate MUST be // specified. diff --git a/txscript/sign.go b/txscript/sign.go index fc89312f20..0a11e1b197 100644 --- a/txscript/sign.go +++ b/txscript/sign.go @@ -82,7 +82,7 @@ func RawTxInTaprootSignature(tx *wire.MsgTx, sigHashes *TxSigHashes, idx int, // Before we sign the sighash, we'll need to apply the taptweak to the // private key based on the tapScriptRootHash. - privKeyTweak := TweakTaprootPrivKey(key, tapScriptRootHash) + privKeyTweak := TweakTaprootPrivKey(*key, tapScriptRootHash) // With the sighash constructed, we can sign it with the specified // private key. @@ -95,7 +95,7 @@ func RawTxInTaprootSignature(tx *wire.MsgTx, sigHashes *TxSigHashes, idx int, // If this is sighash default, then we can just return the signature // directly. - if hashType&SigHashDefault == SigHashDefault { + if hashType == SigHashDefault { return sig, nil } diff --git a/txscript/sign_test.go b/txscript/sign_test.go index ae10ba17d8..b3cf5119d3 100644 --- a/txscript/sign_test.go +++ b/txscript/sign_test.go @@ -10,10 +10,12 @@ import ( "testing" "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" ) type addressToKey struct { @@ -1692,3 +1694,205 @@ nexttest: } } } + +// TestRawTxInTaprootSignature tests that the RawTxInTaprootSignature function +// generates valid signatures for all relevant sighash types. +func TestRawTxInTaprootSignature(t *testing.T) { + t.Parallel() + + privKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + pubKey := ComputeTaprootKeyNoScript(privKey.PubKey()) + + pkScript, err := PayToTaprootScript(pubKey) + require.NoError(t, err) + + // We'll reuse this simple transaction for the tests below. It ends up + // spending from a bip86 P2TR output. + testTx := wire.NewMsgTx(2) + testTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Index: 1, + }, + }) + txOut := &wire.TxOut{ + Value: 1e8, PkScript: pkScript, + } + testTx.AddTxOut(txOut) + + tests := []struct { + sigHashType SigHashType + }{ + { + sigHashType: SigHashDefault, + }, + { + sigHashType: SigHashAll, + }, + { + sigHashType: SigHashNone, + }, + { + sigHashType: SigHashSingle, + }, + { + sigHashType: SigHashSingle | SigHashAnyOneCanPay, + }, + { + sigHashType: SigHashNone | SigHashAnyOneCanPay, + }, + { + sigHashType: SigHashAll | SigHashAnyOneCanPay, + }, + } + for _, test := range tests { + name := fmt.Sprintf("sighash=%v", test.sigHashType) + t.Run(name, func(t *testing.T) { + prevFetcher := NewCannedPrevOutputFetcher( + txOut.PkScript, txOut.Value, + ) + sigHashes := NewTxSigHashes(testTx, prevFetcher) + + sig, err := RawTxInTaprootSignature( + testTx, sigHashes, 0, txOut.Value, txOut.PkScript, + nil, test.sigHashType, privKey, + ) + require.NoError(t, err) + + // If this isn't sighash default, then a sighash should be + // applied. Otherwise, it should be a normal sig. + expectedLen := schnorr.SignatureSize + if test.sigHashType != SigHashDefault { + expectedLen += 1 + } + require.Len(t, sig, expectedLen) + + // Finally, ensure that the signature produced is valid. + txCopy := testTx.Copy() + txCopy.TxIn[0].Witness = wire.TxWitness{sig} + vm, err := NewEngine( + txOut.PkScript, txCopy, 0, StandardVerifyFlags, + nil, sigHashes, txOut.Value, prevFetcher, + ) + require.NoError(t, err) + + require.NoError(t, vm.Execute()) + }) + } +} + +// TestRawTxInTapscriptSignature thats that we're able to produce valid schnorr +// signatures for a simple tapscript spend, for various sighash types. +func TestRawTxInTapscriptSignature(t *testing.T) { + t.Parallel() + + privKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + internalKey := privKey.PubKey() + + // Our script will be a simple OP_CHECKSIG as the sole leaf of a + // tapscript tree. We'll also re-use the internal key as the key in the + // leaf. + builder := NewScriptBuilder() + builder.AddData(schnorr.SerializePubKey(internalKey)) + builder.AddOp(OP_CHECKSIG) + pkScript, err := builder.Script() + require.NoError(t, err) + + tapLeaf := NewBaseTapLeaf(pkScript) + tapScriptTree := AssembleTaprootScriptTree(tapLeaf) + + ctrlBlock := tapScriptTree.LeafMerkleProofs[0].ToControlBlock( + internalKey, + ) + + tapScriptRootHash := tapScriptTree.RootNode.TapHash() + outputKey := ComputeTaprootOutputKey( + internalKey, tapScriptRootHash[:], + ) + p2trScript, err := PayToTaprootScript(outputKey) + require.NoError(t, err) + + // We'll reuse this simple transaction for the tests below. It ends up + // spending from a bip86 P2TR output. + testTx := wire.NewMsgTx(2) + testTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: wire.OutPoint{ + Index: 1, + }, + }) + txOut := &wire.TxOut{ + Value: 1e8, PkScript: p2trScript, + } + testTx.AddTxOut(txOut) + + tests := []struct { + sigHashType SigHashType + }{ + { + sigHashType: SigHashDefault, + }, + { + sigHashType: SigHashAll, + }, + { + sigHashType: SigHashNone, + }, + { + sigHashType: SigHashSingle, + }, + { + sigHashType: SigHashSingle | SigHashAnyOneCanPay, + }, + { + sigHashType: SigHashNone | SigHashAnyOneCanPay, + }, + { + sigHashType: SigHashAll | SigHashAnyOneCanPay, + }, + } + for _, test := range tests { + name := fmt.Sprintf("sighash=%v", test.sigHashType) + t.Run(name, func(t *testing.T) { + prevFetcher := NewCannedPrevOutputFetcher( + txOut.PkScript, txOut.Value, + ) + sigHashes := NewTxSigHashes(testTx, prevFetcher) + + sig, err := RawTxInTapscriptSignature( + testTx, sigHashes, 0, txOut.Value, + txOut.PkScript, tapLeaf, test.sigHashType, + privKey, + ) + require.NoError(t, err) + + // If this isn't sighash default, then a sighash should + // be applied. Otherwise, it should be a normal sig. + expectedLen := schnorr.SignatureSize + if test.sigHashType != SigHashDefault { + expectedLen += 1 + } + require.Len(t, sig, expectedLen) + + // Now that we have the sig, we'll make a valid witness + // including the control block. + ctrlBlockBytes, err := ctrlBlock.ToBytes() + require.NoError(t, err) + txCopy := testTx.Copy() + txCopy.TxIn[0].Witness = wire.TxWitness{ + sig, pkScript, ctrlBlockBytes, + } + + // Finally, ensure that the signature produced is valid. + vm, err := NewEngine( + txOut.PkScript, txCopy, 0, StandardVerifyFlags, + nil, sigHashes, txOut.Value, prevFetcher, + ) + require.NoError(t, err) + + require.NoError(t, vm.Execute()) + }) + } +} diff --git a/txscript/stack.go b/txscript/stack.go index 923047d93e..900a030b2d 100644 --- a/txscript/stack.go +++ b/txscript/stack.go @@ -86,7 +86,7 @@ func (s *stack) PopInt() (scriptNum, error) { return 0, err } - return makeScriptNum(so, s.verifyMinimalData, maxScriptNumLen) + return MakeScriptNum(so, s.verifyMinimalData, maxScriptNumLen) } // PopBool pops the value off the top of the stack, converts it into a bool, and @@ -123,7 +123,7 @@ func (s *stack) PeekInt(idx int32) (scriptNum, error) { return 0, err } - return makeScriptNum(so, s.verifyMinimalData, maxScriptNumLen) + return MakeScriptNum(so, s.verifyMinimalData, maxScriptNumLen) } // PeekBool returns the Nth item on the stack as a bool without removing it. diff --git a/txscript/standard.go b/txscript/standard.go index aa7a7970d7..5ef2ad167f 100644 --- a/txscript/standard.go +++ b/txscript/standard.go @@ -238,10 +238,10 @@ func extractMultisigScriptDetails(scriptVersion uint16, script []byte, extractPu // The first opcode must be a small integer specifying the number of // signatures required. tokenizer := MakeScriptTokenizer(scriptVersion, script) - if !tokenizer.Next() || !isSmallInt(tokenizer.Opcode()) { + if !tokenizer.Next() || !IsSmallInt(tokenizer.Opcode()) { return multiSigDetails{} } - requiredSigs := asSmallInt(tokenizer.Opcode()) + requiredSigs := AsSmallInt(tokenizer.Opcode()) // The next series of opcodes must either push public keys or be a small // integer specifying the number of public keys. @@ -251,7 +251,7 @@ func extractMultisigScriptDetails(scriptVersion uint16, script []byte, extractPu pubKeys = make([][]byte, 0, MaxPubKeysPerMultiSig) } for tokenizer.Next() { - if isSmallInt(tokenizer.Opcode()) { + if IsSmallInt(tokenizer.Opcode()) { break } @@ -271,7 +271,7 @@ func extractMultisigScriptDetails(scriptVersion uint16, script []byte, extractPu // The next opcode must be a small integer specifying the number of public // keys required. op := tokenizer.Opcode() - if !isSmallInt(op) || asSmallInt(op) != numPubKeys { + if !IsSmallInt(op) || AsSmallInt(op) != numPubKeys { return multiSigDetails{} } @@ -422,11 +422,11 @@ func extractWitnessProgramInfo(script []byte) (int, []byte, bool) { // The first opcode must be a small int. if !tokenizer.Next() || - !isSmallInt(tokenizer.Opcode()) { + !IsSmallInt(tokenizer.Opcode()) { return 0, nil, false } - version := asSmallInt(tokenizer.Opcode()) + version := AsSmallInt(tokenizer.Opcode()) // The second opcode must be a canonical data push, the length of the // data push is bounded to 40 by the initial check on overall script @@ -520,7 +520,7 @@ func isNullDataScript(scriptVersion uint16, script []byte) bool { // OP_RETURN followed by data push up to MaxDataCarrierSize bytes. tokenizer := MakeScriptTokenizer(scriptVersion, script[1:]) return tokenizer.Next() && tokenizer.Done() && - (isSmallInt(tokenizer.Opcode()) || tokenizer.Opcode() <= OP_PUSHDATA4) && + (IsSmallInt(tokenizer.Opcode()) || tokenizer.Opcode() <= OP_PUSHDATA4) && len(tokenizer.Data()) <= MaxDataCarrierSize } @@ -627,7 +627,7 @@ func expectedInputs(script []byte, class ScriptClass) int { // the original bitcoind bug where OP_CHECKMULTISIG pops an // additional item from the stack, add an extra expected input // for the extra push that is required to compensate. - return asSmallInt(script[0]) + 1 + return AsSmallInt(script[0]) + 1 case NullDataTy: fallthrough @@ -1119,14 +1119,14 @@ func ExtractAtomicSwapDataPushes(version uint16, pkScript []byte) (*AtomicSwapDa if tplEntry.expectCanonicalInt { switch { case data != nil: - val, err := makeScriptNum(data, true, tplEntry.maxIntBytes) + val, err := MakeScriptNum(data, true, tplEntry.maxIntBytes) if err != nil { return nil, err } tplEntry.extractedInt = int64(val) - case isSmallInt(op): - tplEntry.extractedInt = int64(asSmallInt(op)) + case IsSmallInt(op): + tplEntry.extractedInt = int64(AsSmallInt(op)) // Not an atomic swap script if the opcode does not push an int. default: diff --git a/txscript/taproot.go b/txscript/taproot.go index 2e452f92d7..003eb19ae3 100644 --- a/txscript/taproot.go +++ b/txscript/taproot.go @@ -296,12 +296,12 @@ func ComputeTaprootKeyNoScript(internalKey *btcec.PublicKey) *btcec.PublicKey { // but on the private key instead. The final key is derived as: privKey + // h_tapTweak(internalKey || merkleRoot) % N, where N is the order of the // secp256k1 curve, and merkleRoot is the root hash of the tapscript tree. -func TweakTaprootPrivKey(privKey *btcec.PrivateKey, +func TweakTaprootPrivKey(privKey btcec.PrivateKey, scriptRoot []byte) *btcec.PrivateKey { // If the corresponding public key has an odd y coordinate, then we'll // negate the private key as specified in BIP 341. - privKeyScalar := &privKey.Key + privKeyScalar := privKey.Key pubKeyBytes := privKey.PubKey().SerializeCompressed() if pubKeyBytes[0] == secp.PubKeyFormatCompressedOdd { privKeyScalar.Negate() @@ -774,3 +774,11 @@ func AssembleTaprootScriptTree(leaves ...TapLeaf) *IndexedTapScriptTree { return scriptTree } + +// PayToTaprootScript creates a pk script for a pay-to-taproot output key. +func PayToTaprootScript(taprootKey *btcec.PublicKey) ([]byte, error) { + return NewScriptBuilder(). + AddOp(OP_1). + AddData(schnorr.SerializePubKey(taprootKey)). + Script() +} diff --git a/txscript/taproot_test.go b/txscript/taproot_test.go index 178405b526..01b3780e9c 100644 --- a/txscript/taproot_test.go +++ b/txscript/taproot_test.go @@ -166,8 +166,8 @@ func TestControlBlockParsing(t *testing.T) { // key, then generating a public key from that. This test a quickcheck test to // assert the following invariant: // -// * taproot_tweak_pubkey(pubkey_gen(seckey), h)[1] == -// pubkey_gen(taproot_tweak_seckey(seckey, h)) +// - taproot_tweak_pubkey(pubkey_gen(seckey), h)[1] == +// pubkey_gen(taproot_tweak_seckey(seckey, h)) func TestTaprootScriptSpendTweak(t *testing.T) { t.Parallel() @@ -186,7 +186,7 @@ func TestTaprootScriptSpendTweak(t *testing.T) { tweakedPub := ComputeTaprootOutputKey(privKey.PubKey(), x[:]) // Now we'll generate the corresponding tweaked private key. - tweakedPriv := TweakTaprootPrivKey(privKey, x[:]) + tweakedPriv := TweakTaprootPrivKey(*privKey, x[:]) // The public key for this private key should be the same as // the tweaked public key we generate above. @@ -204,6 +204,42 @@ func TestTaprootScriptSpendTweak(t *testing.T) { } +// TestTaprootTweakNoMutation tests that the underlying private key passed into +// TweakTaprootPrivKey is never mutated. +func TestTaprootTweakNoMutation(t *testing.T) { + t.Parallel() + + // Assert that given a random tweak, and a random private key, that if + // we tweak the private key it remains unaffected. + f := func(privBytes, tweak [32]byte) bool { + privKey, _ := btcec.PrivKeyFromBytes(privBytes[:]) + + // Now we'll generate the corresponding tweaked private key. + tweakedPriv := TweakTaprootPrivKey(*privKey, tweak[:]) + + // The tweaked private key and the original private key should + // NOT be the same. + if *privKey == *tweakedPriv { + t.Logf("private key was mutated") + return false + } + + // We shuold be able to re-derive the private key from raw + // bytes and have that match up again. + privKeyCopy, _ := btcec.PrivKeyFromBytes(privBytes[:]) + if *privKey != *privKeyCopy { + t.Logf("private doesn't match") + return false + } + + return true + } + + if err := quick.Check(f, nil); err != nil { + t.Fatalf("private key modified: %v", err) + } +} + // TestTaprootConstructKeyPath tests the key spend only taproot construction. func TestTaprootConstructKeyPath(t *testing.T) { checkPath := func(branch uint32, expectedAddresses []string) { diff --git a/version.go b/version.go index 19af5b8bd0..d7835910f8 100644 --- a/version.go +++ b/version.go @@ -17,8 +17,8 @@ const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqr // versioning 2.0.0 spec (http://semver.org/). const ( appMajor uint = 0 - appMinor uint = 23 - appPatch uint = 3 + appMinor uint = 24 + appPatch uint = 0 // appPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. diff --git a/wire/bench_test.go b/wire/bench_test.go index 5176c962e8..d19dd775f2 100644 --- a/wire/bench_test.go +++ b/wire/bench_test.go @@ -8,6 +8,7 @@ import ( "bytes" "compress/bzip2" "fmt" + "io" "io/ioutil" "net" "os" @@ -63,38 +64,48 @@ var genesisCoinbaseTx = MsgTx{ // BenchmarkWriteVarInt1 performs a benchmark on how long it takes to write // a single byte variable length integer. func BenchmarkWriteVarInt1(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { - WriteVarInt(ioutil.Discard, 0, 1) + WriteVarInt(io.Discard, 0, 1) } } // BenchmarkWriteVarInt3 performs a benchmark on how long it takes to write // a three byte variable length integer. func BenchmarkWriteVarInt3(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { - WriteVarInt(ioutil.Discard, 0, 65535) + WriteVarInt(io.Discard, 0, 65535) } } // BenchmarkWriteVarInt5 performs a benchmark on how long it takes to write // a five byte variable length integer. func BenchmarkWriteVarInt5(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { - WriteVarInt(ioutil.Discard, 0, 4294967295) + WriteVarInt(io.Discard, 0, 4294967295) } } // BenchmarkWriteVarInt9 performs a benchmark on how long it takes to write // a nine byte variable length integer. func BenchmarkWriteVarInt9(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { - WriteVarInt(ioutil.Discard, 0, 18446744073709551615) + WriteVarInt(io.Discard, 0, 18446744073709551615) } } // BenchmarkReadVarInt1 performs a benchmark on how long it takes to read // a single byte variable length integer. func BenchmarkReadVarInt1(b *testing.B) { + b.ReportAllocs() + buf := []byte{0x01} r := bytes.NewReader(buf) for i := 0; i < b.N; i++ { @@ -106,6 +117,8 @@ func BenchmarkReadVarInt1(b *testing.B) { // BenchmarkReadVarInt3 performs a benchmark on how long it takes to read // a three byte variable length integer. func BenchmarkReadVarInt3(b *testing.B) { + b.ReportAllocs() + buf := []byte{0x0fd, 0xff, 0xff} r := bytes.NewReader(buf) for i := 0; i < b.N; i++ { @@ -117,6 +130,8 @@ func BenchmarkReadVarInt3(b *testing.B) { // BenchmarkReadVarInt5 performs a benchmark on how long it takes to read // a five byte variable length integer. func BenchmarkReadVarInt5(b *testing.B) { + b.ReportAllocs() + buf := []byte{0xfe, 0xff, 0xff, 0xff, 0xff} r := bytes.NewReader(buf) for i := 0; i < b.N; i++ { @@ -128,6 +143,8 @@ func BenchmarkReadVarInt5(b *testing.B) { // BenchmarkReadVarInt9 performs a benchmark on how long it takes to read // a nine byte variable length integer. func BenchmarkReadVarInt9(b *testing.B) { + b.ReportAllocs() + buf := []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} r := bytes.NewReader(buf) for i := 0; i < b.N; i++ { @@ -136,9 +153,119 @@ func BenchmarkReadVarInt9(b *testing.B) { } } +// BenchmarkWriteVarIntBuf1 performs a benchmark on how long it takes to write +// a single byte variable length integer. +func BenchmarkWriteVarIntBuf1(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + for i := 0; i < b.N; i++ { + WriteVarIntBuf(io.Discard, 0, 1, buffer) + } + binarySerializer.Return(buffer) +} + +// BenchmarkWriteVarIntBuf3 performs a benchmark on how long it takes to write +// a three byte variable length integer. +func BenchmarkWriteVarIntBuf3(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + for i := 0; i < b.N; i++ { + WriteVarIntBuf(io.Discard, 0, 65535, buffer) + } + binarySerializer.Return(buffer) +} + +// BenchmarkWriteVarIntBuf5 performs a benchmark on how long it takes to write +// a five byte variable length integer. +func BenchmarkWriteVarIntBuf5(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + for i := 0; i < b.N; i++ { + WriteVarIntBuf(io.Discard, 0, 4294967295, buffer) + } + binarySerializer.Return(buffer) +} + +// BenchmarkWriteVarIntBuf9 performs a benchmark on how long it takes to write +// a nine byte variable length integer. +func BenchmarkWriteVarIntBuf9(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + for i := 0; i < b.N; i++ { + WriteVarIntBuf(io.Discard, 0, 18446744073709551615, buffer) + } + binarySerializer.Return(buffer) +} + +// BenchmarkReadVarIntBuf1 performs a benchmark on how long it takes to read +// a single byte variable length integer. +func BenchmarkReadVarIntBuf1(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + buf := []byte{0x01} + r := bytes.NewReader(buf) + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + ReadVarIntBuf(r, 0, buffer) + } + binarySerializer.Return(buffer) +} + +// BenchmarkReadVarIntBuf3 performs a benchmark on how long it takes to read +// a three byte variable length integer. +func BenchmarkReadVarIntBuf3(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + buf := []byte{0x0fd, 0xff, 0xff} + r := bytes.NewReader(buf) + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + ReadVarIntBuf(r, 0, buffer) + } + binarySerializer.Return(buffer) +} + +// BenchmarkReadVarIntBuf5 performs a benchmark on how long it takes to read +// a five byte variable length integer. +func BenchmarkReadVarIntBuf5(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + buf := []byte{0xfe, 0xff, 0xff, 0xff, 0xff} + r := bytes.NewReader(buf) + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + ReadVarIntBuf(r, 0, buffer) + } + binarySerializer.Return(buffer) +} + +// BenchmarkReadVarIntBuf9 performs a benchmark on how long it takes to read +// a nine byte variable length integer. +func BenchmarkReadVarIntBuf9(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + buf := []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + r := bytes.NewReader(buf) + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + ReadVarIntBuf(r, 0, buffer) + } + binarySerializer.Return(buffer) +} + // BenchmarkReadVarStr4 performs a benchmark on how long it takes to read a // four byte variable length string. func BenchmarkReadVarStr4(b *testing.B) { + b.ReportAllocs() + buf := []byte{0x04, 't', 'e', 's', 't'} r := bytes.NewReader(buf) for i := 0; i < b.N; i++ { @@ -150,6 +277,8 @@ func BenchmarkReadVarStr4(b *testing.B) { // BenchmarkReadVarStr10 performs a benchmark on how long it takes to read a // ten byte variable length string. func BenchmarkReadVarStr10(b *testing.B) { + b.ReportAllocs() + buf := []byte{0x0a, 't', 'e', 's', 't', '0', '1', '2', '3', '4', '5'} r := bytes.NewReader(buf) for i := 0; i < b.N; i++ { @@ -161,22 +290,83 @@ func BenchmarkReadVarStr10(b *testing.B) { // BenchmarkWriteVarStr4 performs a benchmark on how long it takes to write a // four byte variable length string. func BenchmarkWriteVarStr4(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { - WriteVarString(ioutil.Discard, 0, "test") + WriteVarString(io.Discard, 0, "test") } } // BenchmarkWriteVarStr10 performs a benchmark on how long it takes to write a // ten byte variable length string. func BenchmarkWriteVarStr10(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + WriteVarString(io.Discard, 0, "test012345") + } +} + +// BenchmarkReadVarStrBuf4 performs a benchmark on how long it takes to read a +// four byte variable length string. +func BenchmarkReadVarStrBuf4(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + buf := []byte{0x04, 't', 'e', 's', 't'} + r := bytes.NewReader(buf) for i := 0; i < b.N; i++ { - WriteVarString(ioutil.Discard, 0, "test012345") + r.Seek(0, 0) + readVarStringBuf(r, 0, buffer) } + binarySerializer.Return(buffer) +} + +// BenchmarkReadVarStrBuf10 performs a benchmark on how long it takes to read a +// ten byte variable length string. +func BenchmarkReadVarStrBuf10(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + buf := []byte{0x0a, 't', 'e', 's', 't', '0', '1', '2', '3', '4', '5'} + r := bytes.NewReader(buf) + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + readVarStringBuf(r, 0, buf) + } + binarySerializer.Return(buffer) +} + +// BenchmarkWriteVarStrBuf4 performs a benchmark on how long it takes to write a +// four byte variable length string. +func BenchmarkWriteVarStrBuf4(b *testing.B) { + b.ReportAllocs() + + buf := binarySerializer.Borrow() + for i := 0; i < b.N; i++ { + writeVarStringBuf(io.Discard, 0, "test", buf) + } + binarySerializer.Return(buf) +} + +// BenchmarkWriteVarStrBuf10 performs a benchmark on how long it takes to write +// a ten byte variable length string. +func BenchmarkWriteVarStrBuf10(b *testing.B) { + b.ReportAllocs() + + buf := binarySerializer.Borrow() + for i := 0; i < b.N; i++ { + writeVarStringBuf(io.Discard, 0, "test012345", buf) + } + binarySerializer.Return(buf) } // BenchmarkReadOutPoint performs a benchmark on how long it takes to read a // transaction output point. func BenchmarkReadOutPoint(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() buf := []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -188,25 +378,46 @@ func BenchmarkReadOutPoint(b *testing.B) { var op OutPoint for i := 0; i < b.N; i++ { r.Seek(0, 0) - readOutPoint(r, 0, 0, &op) + readOutPointBuf(r, 0, 0, &op, buffer) } + binarySerializer.Return(buffer) } // BenchmarkWriteOutPoint performs a benchmark on how long it takes to write a // transaction output point. func BenchmarkWriteOutPoint(b *testing.B) { + b.ReportAllocs() + op := &OutPoint{ Hash: chainhash.Hash{}, Index: 0, } for i := 0; i < b.N; i++ { - WriteOutPoint(ioutil.Discard, 0, 0, op) + WriteOutPoint(io.Discard, 0, 0, op) } } +// BenchmarkWriteOutPointBuf performs a benchmark on how long it takes to write a +// transaction output point. +func BenchmarkWriteOutPointBuf(b *testing.B) { + b.ReportAllocs() + + buf := binarySerializer.Borrow() + op := &OutPoint{ + Hash: chainhash.Hash{}, + Index: 0, + } + for i := 0; i < b.N; i++ { + writeOutPointBuf(io.Discard, 0, 0, op, buf) + } + binarySerializer.Return(buf) +} + // BenchmarkReadTxOut performs a benchmark on how long it takes to read a // transaction output. func BenchmarkReadTxOut(b *testing.B) { + b.ReportAllocs() + buf := []byte{ 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount 0x43, // Varint for length of pk script @@ -227,22 +438,74 @@ func BenchmarkReadTxOut(b *testing.B) { for i := 0; i < b.N; i++ { r.Seek(0, 0) ReadTxOut(r, 0, 0, &txOut) - scriptPool.Return(txOut.PkScript) } } +// BenchmarkReadTxOutBuf performs a benchmark on how long it takes to read a +// transaction output. +func BenchmarkReadTxOutBuf(b *testing.B) { + b.ReportAllocs() + + scriptBuffer := scriptPool.Borrow() + sbuf := scriptBuffer[:] + buffer := binarySerializer.Borrow() + buf := []byte{ + 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount + 0x43, // Varint for length of pk script + 0x41, // OP_DATA_65 + 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, + 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, + 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, + 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, + 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, + 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, + 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, + 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, + 0xee, // 65-byte signature + 0xac, // OP_CHECKSIG + } + r := bytes.NewReader(buf) + var txOut TxOut + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + readTxOutBuf(r, 0, 0, &txOut, buffer, sbuf) + } + binarySerializer.Return(buffer) + scriptPool.Return(scriptBuffer) +} + // BenchmarkWriteTxOut performs a benchmark on how long it takes to write // a transaction output. func BenchmarkWriteTxOut(b *testing.B) { + b.ReportAllocs() + txOut := blockOne.Transactions[0].TxOut[0] for i := 0; i < b.N; i++ { - WriteTxOut(ioutil.Discard, 0, 0, txOut) + WriteTxOut(io.Discard, 0, 0, txOut) } } +// BenchmarkWriteTxOutBuf performs a benchmark on how long it takes to write +// a transaction output. +func BenchmarkWriteTxOutBuf(b *testing.B) { + b.ReportAllocs() + + buf := binarySerializer.Borrow() + txOut := blockOne.Transactions[0].TxOut[0] + for i := 0; i < b.N; i++ { + WriteTxOutBuf(io.Discard, 0, 0, txOut, buf) + } + binarySerializer.Return(buf) +} + // BenchmarkReadTxIn performs a benchmark on how long it takes to read a // transaction input. func BenchmarkReadTxIn(b *testing.B) { + b.ReportAllocs() + + scriptBuffer := scriptPool.Borrow() + sbuf := scriptBuffer[:] + buffer := binarySerializer.Borrow() buf := []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -257,18 +520,23 @@ func BenchmarkReadTxIn(b *testing.B) { var txIn TxIn for i := 0; i < b.N; i++ { r.Seek(0, 0) - readTxIn(r, 0, 0, &txIn) - scriptPool.Return(txIn.SignatureScript) + readTxInBuf(r, 0, 0, &txIn, buffer, sbuf) } + binarySerializer.Return(buffer) + scriptPool.Return(scriptBuffer) } // BenchmarkWriteTxIn performs a benchmark on how long it takes to write // a transaction input. func BenchmarkWriteTxIn(b *testing.B) { + b.ReportAllocs() + + buf := binarySerializer.Borrow() txIn := blockOne.Transactions[0].TxIn[0] for i := 0; i < b.N; i++ { - writeTxIn(ioutil.Discard, 0, 0, txIn) + writeTxInBuf(io.Discard, 0, 0, txIn, buf) } + binarySerializer.Return(buf) } // BenchmarkDeserializeTx performs a benchmark on how long it takes to @@ -302,6 +570,9 @@ func BenchmarkDeserializeTxSmall(b *testing.B) { 0x00, 0x00, 0x00, 0x00, // Lock time } + b.ReportAllocs() + b.ResetTimer() + r := bytes.NewReader(buf) var tx MsgTx for i := 0; i < b.N; i++ { @@ -313,6 +584,7 @@ func BenchmarkDeserializeTxSmall(b *testing.B) { // BenchmarkDeserializeTxLarge performs a benchmark on how long it takes to // deserialize a very large transaction. func BenchmarkDeserializeTxLarge(b *testing.B) { + // tx bb41a757f405890fb0f5856228e23b715702d714d59bf2b1feb70d8b2b4e3e08 // from the main block chain. fi, err := os.Open("testdata/megatx.bin.bz2") @@ -325,6 +597,9 @@ func BenchmarkDeserializeTxLarge(b *testing.B) { b.Fatalf("Failed to read transaction data: %v", err) } + b.ReportAllocs() + b.ResetTimer() + r := bytes.NewReader(buf) var tx MsgTx for i := 0; i < b.N; i++ { @@ -333,19 +608,132 @@ func BenchmarkDeserializeTxLarge(b *testing.B) { } } +func BenchmarkDeserializeBlock(b *testing.B) { + buf, err := os.ReadFile( + "testdata/block-00000000000000000021868c2cefc52a480d173c849412fe81c4e5ab806f94ab.blk", + ) + if err != nil { + b.Fatalf("Failed to read block data: %v", err) + } + + b.ReportAllocs() + b.ResetTimer() + + r := bytes.NewReader(buf) + var block MsgBlock + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + block.Deserialize(r) + } +} + +func BenchmarkSerializeBlock(b *testing.B) { + buf, err := os.ReadFile( + "testdata/block-00000000000000000021868c2cefc52a480d173c849412fe81c4e5ab806f94ab.blk", + ) + if err != nil { + b.Fatalf("Failed to read block data: %v", err) + } + + var block MsgBlock + err = block.Deserialize(bytes.NewReader(buf)) + if err != nil { + panic(err.Error()) + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + block.Serialize(io.Discard) + } +} + // BenchmarkSerializeTx performs a benchmark on how long it takes to serialize // a transaction. func BenchmarkSerializeTx(b *testing.B) { + b.ReportAllocs() + tx := blockOne.Transactions[0] for i := 0; i < b.N; i++ { - tx.Serialize(ioutil.Discard) + tx.Serialize(io.Discard) + + } +} + +// BenchmarkSerializeTxSmall performs a benchmark on how long it takes to +// serialize a transaction. +func BenchmarkSerializeTxSmall(b *testing.B) { + buf := []byte{ + 0x01, 0x00, 0x00, 0x00, // Version + 0x01, // Varint for number of input transactions + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash + 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0x07, // Varint for length of signature script + 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script + 0xff, 0xff, 0xff, 0xff, // Sequence + 0x01, // Varint for number of output transactions + 0x00, 0xf2, 0x05, 0x2a, 0x01, 0x00, 0x00, 0x00, // Transaction amount + 0x43, // Varint for length of pk script + 0x41, // OP_DATA_65 + 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, + 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, + 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, + 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, + 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, + 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, + 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, + 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, + 0xee, // 65-byte signature + 0xac, // OP_CHECKSIG + 0x00, 0x00, 0x00, 0x00, // Lock time + } + + var tx MsgTx + tx.Deserialize(bytes.NewReader(buf)) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + tx.Serialize(io.Discard) + } +} +// BenchmarkSerializeTxLarge performs a benchmark on how long it takes to +// serialize a transaction. +func BenchmarkSerializeTxLarge(b *testing.B) { + // tx bb41a757f405890fb0f5856228e23b715702d714d59bf2b1feb70d8b2b4e3e08 + // from the main block chain. + fi, err := os.Open("testdata/megatx.bin.bz2") + if err != nil { + b.Fatalf("Failed to read transaction data: %v", err) + } + defer fi.Close() + buf, err := ioutil.ReadAll(bzip2.NewReader(fi)) + if err != nil { + b.Fatalf("Failed to read transaction data: %v", err) + } + + var tx MsgTx + tx.Deserialize(bytes.NewReader(buf)) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + tx.Serialize(io.Discard) } } // BenchmarkReadBlockHeader performs a benchmark on how long it takes to // deserialize a block header. func BenchmarkReadBlockHeader(b *testing.B) { + b.ReportAllocs() + buf := []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, @@ -369,18 +757,65 @@ func BenchmarkReadBlockHeader(b *testing.B) { } } +// BenchmarkReadBlockHeaderBuf performs a benchmark on how long it takes to +// deserialize a block header. +func BenchmarkReadBlockHeaderBuf(b *testing.B) { + b.ReportAllocs() + + buffer := binarySerializer.Borrow() + buf := []byte{ + 0x01, 0x00, 0x00, 0x00, // Version 1 + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, + 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock + 0x3b, 0xa3, 0xed, 0xfd, 0x7a, 0x7b, 0x12, 0xb2, + 0x7a, 0xc7, 0x2c, 0x3e, 0x67, 0x76, 0x8f, 0x61, + 0x7f, 0xc8, 0x1b, 0xc3, 0x88, 0x8a, 0x51, 0x32, + 0x3a, 0x9f, 0xb8, 0xaa, 0x4b, 0x1e, 0x5e, 0x4a, // MerkleRoot + 0x29, 0xab, 0x5f, 0x49, // Timestamp + 0xff, 0xff, 0x00, 0x1d, // Bits + 0xf3, 0xe0, 0x01, 0x00, // Nonce + 0x00, // TxnCount Varint + } + r := bytes.NewReader(buf) + var header BlockHeader + for i := 0; i < b.N; i++ { + r.Seek(0, 0) + readBlockHeaderBuf(r, 0, &header, buffer) + } + binarySerializer.Return(buffer) +} + // BenchmarkWriteBlockHeader performs a benchmark on how long it takes to // serialize a block header. func BenchmarkWriteBlockHeader(b *testing.B) { + b.ReportAllocs() + + header := blockOne.Header + for i := 0; i < b.N; i++ { + writeBlockHeader(io.Discard, 0, &header) + } +} + +// BenchmarkWriteBlockHeaderBuf performs a benchmark on how long it takes to +// serialize a block header. +func BenchmarkWriteBlockHeaderBuf(b *testing.B) { + b.ReportAllocs() + + buf := binarySerializer.Borrow() header := blockOne.Header for i := 0; i < b.N; i++ { - writeBlockHeader(ioutil.Discard, 0, &header) + writeBlockHeaderBuf(io.Discard, 0, &header, buf) } + binarySerializer.Return(buf) } // BenchmarkDecodeGetHeaders performs a benchmark on how long it takes to // decode a getheaders message with the maximum number of block locator hashes. func BenchmarkDecodeGetHeaders(b *testing.B) { + b.ReportAllocs() + // Create a message with the maximum number of block locators. pver := ProtocolVersion var m MsgGetHeaders @@ -411,6 +846,8 @@ func BenchmarkDecodeGetHeaders(b *testing.B) { // BenchmarkDecodeHeaders performs a benchmark on how long it takes to // decode a headers message with the maximum number of headers. func BenchmarkDecodeHeaders(b *testing.B) { + b.ReportAllocs() + // Create a message with the maximum number of headers. pver := ProtocolVersion var m MsgHeaders @@ -441,6 +878,8 @@ func BenchmarkDecodeHeaders(b *testing.B) { // BenchmarkDecodeGetBlocks performs a benchmark on how long it takes to // decode a getblocks message with the maximum number of block locator hashes. func BenchmarkDecodeGetBlocks(b *testing.B) { + b.ReportAllocs() + // Create a message with the maximum number of block locators. pver := ProtocolVersion var m MsgGetBlocks @@ -471,6 +910,8 @@ func BenchmarkDecodeGetBlocks(b *testing.B) { // BenchmarkDecodeAddr performs a benchmark on how long it takes to decode an // addr message with the maximum number of addresses. func BenchmarkDecodeAddr(b *testing.B) { + b.ReportAllocs() + // Create a message with the maximum number of addresses. pver := ProtocolVersion ip := net.ParseIP("127.0.0.1") @@ -516,6 +957,9 @@ func BenchmarkDecodeInv(b *testing.B) { } buf := bb.Bytes() + b.ReportAllocs() + b.ResetTimer() + r := bytes.NewReader(buf) var msg MsgInv b.ResetTimer() @@ -528,6 +972,8 @@ func BenchmarkDecodeInv(b *testing.B) { // BenchmarkDecodeNotFound performs a benchmark on how long it takes to decode // a notfound message with the maximum number of entries. func BenchmarkDecodeNotFound(b *testing.B) { + b.ReportAllocs() + // Create a message with the maximum number of entries. pver := ProtocolVersion var m MsgNotFound @@ -558,6 +1004,8 @@ func BenchmarkDecodeNotFound(b *testing.B) { // BenchmarkDecodeMerkleBlock performs a benchmark on how long it takes to // decode a reasonably sized merkleblock message. func BenchmarkDecodeMerkleBlock(b *testing.B) { + b.ReportAllocs() + // Create a message with random data. pver := ProtocolVersion var m MsgMerkleBlock @@ -596,6 +1044,8 @@ func BenchmarkDecodeMerkleBlock(b *testing.B) { // BenchmarkTxHash performs a benchmark on how long it takes to hash a // transaction. func BenchmarkTxHash(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { genesisCoinbaseTx.TxHash() } @@ -604,6 +1054,8 @@ func BenchmarkTxHash(b *testing.B) { // BenchmarkDoubleHashB performs a benchmark on how long it takes to perform a // double hash returning a byte slice. func BenchmarkDoubleHashB(b *testing.B) { + b.ReportAllocs() + var buf bytes.Buffer if err := genesisCoinbaseTx.Serialize(&buf); err != nil { b.Errorf("Serialize: unexpected error: %v", err) @@ -620,6 +1072,8 @@ func BenchmarkDoubleHashB(b *testing.B) { // BenchmarkDoubleHashH performs a benchmark on how long it takes to perform // a double hash returning a chainhash.Hash. func BenchmarkDoubleHashH(b *testing.B) { + b.ReportAllocs() + var buf bytes.Buffer if err := genesisCoinbaseTx.Serialize(&buf); err != nil { b.Errorf("Serialize: unexpected error: %v", err) diff --git a/wire/blockheader.go b/wire/blockheader.go index 9c9c2237e6..b5bce44e5b 100644 --- a/wire/blockheader.go +++ b/wire/blockheader.go @@ -5,7 +5,6 @@ package wire import ( - "bytes" "io" "time" @@ -46,14 +45,9 @@ const blockHeaderLen = 80 // BlockHash computes the block identifier hash for the given block header. func (h *BlockHeader) BlockHash() chainhash.Hash { - // Encode the header and double sha256 everything prior to the number of - // transactions. Ignore the error returns since there is no way the - // encode could fail except being out of memory which would cause a - // run-time panic. - buf := bytes.NewBuffer(make([]byte, 0, MaxBlockHeaderPayload)) - _ = writeBlockHeader(buf, 0, h) - - return chainhash.DoubleHashH(buf.Bytes()) + return chainhash.DoubleHashRaw(func(w io.Writer) error { + return writeBlockHeader(w, 0, h) + }) } // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. @@ -113,16 +107,109 @@ func NewBlockHeader(version int32, prevHash, merkleRootHash *chainhash.Hash, // readBlockHeader reads a bitcoin block header from r. See Deserialize for // decoding block headers stored to disk, such as in a database, as opposed to // decoding from the wire. +// +// DEPRECATED: Use readBlockHeaderBuf instead. func readBlockHeader(r io.Reader, pver uint32, bh *BlockHeader) error { - return readElements(r, &bh.Version, &bh.PrevBlock, &bh.MerkleRoot, - (*uint32Time)(&bh.Timestamp), &bh.Bits, &bh.Nonce) + buf := binarySerializer.Borrow() + err := readBlockHeaderBuf(r, pver, bh, buf) + binarySerializer.Return(buf) + return err +} + +// readBlockHeaderBuf reads a bitcoin block header from r. See Deserialize for +// decoding block headers stored to disk, such as in a database, as opposed to +// decoding from the wire. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func readBlockHeaderBuf(r io.Reader, pver uint32, bh *BlockHeader, + buf []byte) error { + + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + bh.Version = int32(littleEndian.Uint32(buf[:4])) + + if _, err := io.ReadFull(r, bh.PrevBlock[:]); err != nil { + return err + } + + if _, err := io.ReadFull(r, bh.MerkleRoot[:]); err != nil { + return err + } + + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + bh.Timestamp = time.Unix(int64(littleEndian.Uint32(buf[:4])), 0) + + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + bh.Bits = littleEndian.Uint32(buf[:4]) + + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + bh.Nonce = littleEndian.Uint32(buf[:4]) + + return nil } // writeBlockHeader writes a bitcoin block header to w. See Serialize for // encoding block headers to be stored to disk, such as in a database, as // opposed to encoding for the wire. +// +// DEPRECATED: Use writeBlockHeaderBuf instead. func writeBlockHeader(w io.Writer, pver uint32, bh *BlockHeader) error { - sec := uint32(bh.Timestamp.Unix()) - return writeElements(w, bh.Version, &bh.PrevBlock, &bh.MerkleRoot, - sec, bh.Bits, bh.Nonce) + buf := binarySerializer.Borrow() + err := writeBlockHeaderBuf(w, pver, bh, buf) + binarySerializer.Return(buf) + return err +} + +// writeBlockHeaderBuf writes a bitcoin block header to w. See Serialize for +// encoding block headers to be stored to disk, such as in a database, as +// opposed to encoding for the wire. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func writeBlockHeaderBuf(w io.Writer, pver uint32, bh *BlockHeader, + buf []byte) error { + + littleEndian.PutUint32(buf[:4], uint32(bh.Version)) + if _, err := w.Write(buf[:4]); err != nil { + return err + } + + if _, err := w.Write(bh.PrevBlock[:]); err != nil { + return err + } + + if _, err := w.Write(bh.MerkleRoot[:]); err != nil { + return err + } + + littleEndian.PutUint32(buf[:4], uint32(bh.Timestamp.Unix())) + if _, err := w.Write(buf[:4]); err != nil { + return err + } + + littleEndian.PutUint32(buf[:4], bh.Bits) + if _, err := w.Write(buf[:4]); err != nil { + return err + } + + littleEndian.PutUint32(buf[:4], bh.Nonce) + if _, err := w.Write(buf[:4]); err != nil { + return err + } + + return nil } diff --git a/wire/common.go b/wire/common.go index 42c1797b32..d3a82c46c0 100644 --- a/wire/common.go +++ b/wire/common.go @@ -73,12 +73,13 @@ func (l binaryFreeList) Return(buf []byte) { // free list and returns it as a uint8. func (l binaryFreeList) Uint8(r io.Reader) (uint8, error) { buf := l.Borrow()[:1] + defer l.Return(buf) + if _, err := io.ReadFull(r, buf); err != nil { - l.Return(buf) return 0, err } rv := buf[0] - l.Return(buf) + return rv, nil } @@ -87,12 +88,13 @@ func (l binaryFreeList) Uint8(r io.Reader) (uint8, error) { // the resulting uint16. func (l binaryFreeList) Uint16(r io.Reader, byteOrder binary.ByteOrder) (uint16, error) { buf := l.Borrow()[:2] + defer l.Return(buf) + if _, err := io.ReadFull(r, buf); err != nil { - l.Return(buf) return 0, err } rv := byteOrder.Uint16(buf) - l.Return(buf) + return rv, nil } @@ -101,12 +103,13 @@ func (l binaryFreeList) Uint16(r io.Reader, byteOrder binary.ByteOrder) (uint16, // the resulting uint32. func (l binaryFreeList) Uint32(r io.Reader, byteOrder binary.ByteOrder) (uint32, error) { buf := l.Borrow()[:4] + defer l.Return(buf) + if _, err := io.ReadFull(r, buf); err != nil { - l.Return(buf) return 0, err } rv := byteOrder.Uint32(buf) - l.Return(buf) + return rv, nil } @@ -115,12 +118,13 @@ func (l binaryFreeList) Uint32(r io.Reader, byteOrder binary.ByteOrder) (uint32, // the resulting uint64. func (l binaryFreeList) Uint64(r io.Reader, byteOrder binary.ByteOrder) (uint64, error) { buf := l.Borrow()[:8] + defer l.Return(buf) + if _, err := io.ReadFull(r, buf); err != nil { - l.Return(buf) return 0, err } rv := byteOrder.Uint64(buf) - l.Return(buf) + return rv, nil } @@ -128,9 +132,11 @@ func (l binaryFreeList) Uint64(r io.Reader, byteOrder binary.ByteOrder) (uint64, // writes the resulting byte to the given writer. func (l binaryFreeList) PutUint8(w io.Writer, val uint8) error { buf := l.Borrow()[:1] + defer l.Return(buf) + buf[0] = val _, err := w.Write(buf) - l.Return(buf) + return err } @@ -139,9 +145,11 @@ func (l binaryFreeList) PutUint8(w io.Writer, val uint8) error { // writer. func (l binaryFreeList) PutUint16(w io.Writer, byteOrder binary.ByteOrder, val uint16) error { buf := l.Borrow()[:2] + defer l.Return(buf) + byteOrder.PutUint16(buf, val) _, err := w.Write(buf) - l.Return(buf) + return err } @@ -150,9 +158,11 @@ func (l binaryFreeList) PutUint16(w io.Writer, byteOrder binary.ByteOrder, val u // writer. func (l binaryFreeList) PutUint32(w io.Writer, byteOrder binary.ByteOrder, val uint32) error { buf := l.Borrow()[:4] + defer l.Return(buf) + byteOrder.PutUint32(buf, val) _, err := w.Write(buf) - l.Return(buf) + return err } @@ -161,9 +171,11 @@ func (l binaryFreeList) PutUint32(w io.Writer, byteOrder binary.ByteOrder, val u // writer. func (l binaryFreeList) PutUint64(w io.Writer, byteOrder binary.ByteOrder, val uint64) error { buf := l.Borrow()[:8] + defer l.Return(buf) + byteOrder.PutUint64(buf, val) _, err := w.Write(buf) - l.Return(buf) + return err } @@ -474,19 +486,30 @@ func writeElements(w io.Writer, elements ...interface{}) error { // ReadVarInt reads a variable length integer from r and returns it as a uint64. func ReadVarInt(r io.Reader, pver uint32) (uint64, error) { - discriminant, err := binarySerializer.Uint8(r) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + n, err := ReadVarIntBuf(r, pver, buf) + return n, err +} + +// ReadVarIntBuf reads a variable length integer from r using a preallocated +// scratch buffer and returns it as a uint64. +// +// NOTE: buf MUST at least an 8-byte slice. +func ReadVarIntBuf(r io.Reader, pver uint32, buf []byte) (uint64, error) { + if _, err := io.ReadFull(r, buf[:1]); err != nil { return 0, err } + discriminant := buf[0] var rv uint64 switch discriminant { case 0xff: - sv, err := binarySerializer.Uint64(r, littleEndian) - if err != nil { + if _, err := io.ReadFull(r, buf); err != nil { return 0, err } - rv = sv + rv = littleEndian.Uint64(buf) // The encoding is not canonical if the value could have been // encoded using fewer bytes. @@ -497,11 +520,10 @@ func ReadVarInt(r io.Reader, pver uint32) (uint64, error) { } case 0xfe: - sv, err := binarySerializer.Uint32(r, littleEndian) - if err != nil { + if _, err := io.ReadFull(r, buf[:4]); err != nil { return 0, err } - rv = uint64(sv) + rv = uint64(littleEndian.Uint32(buf[:4])) // The encoding is not canonical if the value could have been // encoded using fewer bytes. @@ -512,11 +534,10 @@ func ReadVarInt(r io.Reader, pver uint32) (uint64, error) { } case 0xfd: - sv, err := binarySerializer.Uint16(r, littleEndian) - if err != nil { + if _, err := io.ReadFull(r, buf[:2]); err != nil { return 0, err } - rv = uint64(sv) + rv = uint64(littleEndian.Uint16(buf[:2])) // The encoding is not canonical if the value could have been // encoded using fewer bytes. @@ -536,31 +557,46 @@ func ReadVarInt(r io.Reader, pver uint32) (uint64, error) { // WriteVarInt serializes val to w using a variable number of bytes depending // on its value. func WriteVarInt(w io.Writer, pver uint32, val uint64) error { - if val < 0xfd { - return binarySerializer.PutUint8(w, uint8(val)) - } + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) - if val <= math.MaxUint16 { - err := binarySerializer.PutUint8(w, 0xfd) - if err != nil { - return err - } - return binarySerializer.PutUint16(w, littleEndian, uint16(val)) - } + err := WriteVarIntBuf(w, pver, val, buf) + return err +} - if val <= math.MaxUint32 { - err := binarySerializer.PutUint8(w, 0xfe) - if err != nil { +// WriteVarIntBuf serializes val to w using a variable number of bytes depending +// on its value using a preallocated scratch buffer. +// +// NOTE: buf MUST at least an 8-byte slice. +func WriteVarIntBuf(w io.Writer, pver uint32, val uint64, buf []byte) error { + switch { + case val < 0xfd: + buf[0] = uint8(val) + _, err := w.Write(buf[:1]) + return err + + case val <= math.MaxUint16: + buf[0] = 0xfd + littleEndian.PutUint16(buf[1:3], uint16(val)) + _, err := w.Write(buf[:3]) + return err + + case val <= math.MaxUint32: + buf[0] = 0xfe + littleEndian.PutUint32(buf[1:5], uint32(val)) + _, err := w.Write(buf[:5]) + return err + + default: + buf[0] = 0xff + if _, err := w.Write(buf[:1]); err != nil { return err } - return binarySerializer.PutUint32(w, littleEndian, uint32(val)) - } - err := binarySerializer.PutUint8(w, 0xff) - if err != nil { + littleEndian.PutUint64(buf, val) + _, err := w.Write(buf) return err } - return binarySerializer.PutUint64(w, littleEndian, val) } // VarIntSerializeSize returns the number of bytes it would take to serialize @@ -593,7 +629,27 @@ func VarIntSerializeSize(val uint64) int { // maximum block payload size since it helps protect against memory exhaustion // attacks and forced panics through malformed messages. func ReadVarString(r io.Reader, pver uint32) (string, error) { - count, err := ReadVarInt(r, pver) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + str, err := readVarStringBuf(r, pver, buf) + return str, err +} + +// readVarStringBuf reads a variable length string from r and returns it as a Go +// string. A variable length string is encoded as a variable length integer +// containing the length of the string followed by the bytes that represent the +// string itself. An error is returned if the length is greater than the +// maximum block payload size since it helps protect against memory exhaustion +// attacks and forced panics through malformed messages. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func readVarStringBuf(r io.Reader, pver uint32, buf []byte) (string, error) { + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return "", err } @@ -607,22 +663,40 @@ func ReadVarString(r io.Reader, pver uint32) (string, error) { return "", messageError("ReadVarString", str) } - buf := make([]byte, count) - _, err = io.ReadFull(r, buf) + str := make([]byte, count) + _, err = io.ReadFull(r, str) if err != nil { return "", err } - return string(buf), nil + return string(str), nil } // WriteVarString serializes str to w as a variable length integer containing // the length of the string followed by the bytes that represent the string // itself. func WriteVarString(w io.Writer, pver uint32, str string) error { - err := WriteVarInt(w, pver, uint64(len(str))) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := writeVarStringBuf(w, pver, str, buf) + return err +} + +// writeVarStringBuf serializes str to w as a variable length integer containing +// the length of the string followed by the bytes that represent the string +// itself. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func writeVarStringBuf(w io.Writer, pver uint32, str string, buf []byte) error { + err := WriteVarIntBuf(w, pver, uint64(len(str)), buf) if err != nil { return err } + _, err = w.Write([]byte(str)) return err } @@ -637,7 +711,26 @@ func WriteVarString(w io.Writer, pver uint32, str string) error { func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ([]byte, error) { - count, err := ReadVarInt(r, pver) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + b, err := ReadVarBytesBuf(r, pver, buf, maxAllowed, fieldName) + return b, err +} + +// ReadVarBytesBuf reads a variable length byte array. A byte array is encoded +// as a varInt containing the length of the array followed by the bytes +// themselves. An error is returned if the length is greater than the +// passed maxAllowed parameter which helps protect against memory exhaustion +// attacks and forced panics through malformed messages. The fieldName +// parameter is only used for the error message so it provides more context in +// the error. If b is non-nil, the provided buffer will be used for serializing +// small values. Otherwise a buffer will be drawn from the binarySerializer's +// pool and return when the method finishes. +func ReadVarBytesBuf(r io.Reader, pver uint32, buf []byte, maxAllowed uint32, + fieldName string) ([]byte, error) { + + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return nil, err } @@ -651,19 +744,33 @@ func ReadVarBytes(r io.Reader, pver uint32, maxAllowed uint32, return nil, messageError("ReadVarBytes", str) } - b := make([]byte, count) - _, err = io.ReadFull(r, b) + bytes := make([]byte, count) + _, err = io.ReadFull(r, bytes) if err != nil { return nil, err } - return b, nil + return bytes, nil } // WriteVarBytes serializes a variable length byte array to w as a varInt // containing the number of bytes, followed by the bytes themselves. func WriteVarBytes(w io.Writer, pver uint32, bytes []byte) error { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := WriteVarBytesBuf(w, pver, bytes, buf) + return err +} + +// WriteVarBytesBuf serializes a variable length byte array to w as a varInt +// containing the number of bytes, followed by the bytes themselves. If b is +// non-nil, the provided buffer will be used for serializing small values. +// Otherwise a buffer will be drawn from the binarySerializer's pool and return +// when the method finishes. +func WriteVarBytesBuf(w io.Writer, pver uint32, bytes, buf []byte) error { slen := uint64(len(bytes)) - err := WriteVarInt(w, pver, slen) + + err := WriteVarIntBuf(w, pver, slen, buf) if err != nil { return err } diff --git a/wire/doc.go b/wire/doc.go index b8b8c56fff..5e03ff20a1 100644 --- a/wire/doc.go +++ b/wire/doc.go @@ -14,7 +14,7 @@ supported bitcoin messages to and from the wire. This package does not deal with the specifics of message handling such as what to do when a message is received. This provides the caller with a high level of flexibility. -Bitcoin Message Overview +# Bitcoin Message Overview The bitcoin protocol consists of exchanging messages between peers. Each message is preceded by a header which identifies information about it such as @@ -30,7 +30,7 @@ messages, all of the details of marshalling and unmarshalling to and from the wire using bitcoin encoding are handled so the caller doesn't have to concern themselves with the specifics. -Message Interaction +# Message Interaction The following provides a quick summary of how the bitcoin messages are intended to interact with one another. As stated above, these interactions are not @@ -62,13 +62,13 @@ interactions in no particular order. in BIP0031. The BIP0031Version constant can be used to detect a recent enough protocol version for this purpose (version > BIP0031Version). -Common Parameters +# Common Parameters There are several common parameters that arise when using this package to read and write bitcoin messages. The following sections provide a quick overview of these parameters so the next sections can build on them. -Protocol Version +# Protocol Version The protocol version should be negotiated with the remote peer at a higher level than this package via the version (MsgVersion) message exchange, however, @@ -77,7 +77,7 @@ latest protocol version this package supports and is typically the value to use for all outbound connections before a potentially lower protocol version is negotiated. -Bitcoin Network +# Bitcoin Network The bitcoin network is a magic number which is used to identify the start of a message and which bitcoin network the message applies to. This package provides @@ -88,7 +88,7 @@ the following constants: wire.TestNet3 (Test network version 3) wire.SimNet (Simulation test network) -Determining Message Type +# Determining Message Type As discussed in the bitcoin message overview section, this package reads and writes bitcoin messages using a generic interface named Message. In @@ -106,7 +106,7 @@ switch or type assertion. An example of a type switch follows: fmt.Printf("Number of tx in block: %v", msg.Header.TxnCount) } -Reading Messages +# Reading Messages In order to unmarshall bitcoin messages from the wire, use the ReadMessage function. It accepts any io.Reader, but typically this will be a net.Conn to @@ -121,7 +121,7 @@ a remote node running a bitcoin peer. Example syntax is: // Log and handle the error } -Writing Messages +# Writing Messages In order to marshall bitcoin messages to the wire, use the WriteMessage function. It accepts any io.Writer, but typically this will be a net.Conn to @@ -139,7 +139,7 @@ from a remote peer is: // Log and handle the error } -Errors +# Errors Errors returned by this package are either the raw errors provided by underlying calls to read/write from streams such as io.EOF, io.ErrUnexpectedEOF, and @@ -147,7 +147,7 @@ io.ErrShortWrite, or of type wire.MessageError. This allows the caller to differentiate between general IO errors and malformed messages through type assertions. -Bitcoin Improvement Proposals +# Bitcoin Improvement Proposals This package includes spec changes outlined by the following BIPs: diff --git a/wire/invvect.go b/wire/invvect.go index 1e706642b4..c0756a8f1c 100644 --- a/wire/invvect.go +++ b/wire/invvect.go @@ -74,13 +74,37 @@ func NewInvVect(typ InvType, hash *chainhash.Hash) *InvVect { } } -// readInvVect reads an encoded InvVect from r depending on the protocol +// readInvVectBuf reads an encoded InvVect from r depending on the protocol // version. -func readInvVect(r io.Reader, pver uint32, iv *InvVect) error { - return readElements(r, &iv.Type, &iv.Hash) +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func readInvVectBuf(r io.Reader, pver uint32, iv *InvVect, buf []byte) error { + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + iv.Type = InvType(littleEndian.Uint32(buf[:4])) + + _, err := io.ReadFull(r, iv.Hash[:]) + return err } -// writeInvVect serializes an InvVect to w depending on the protocol version. -func writeInvVect(w io.Writer, pver uint32, iv *InvVect) error { - return writeElements(w, iv.Type, &iv.Hash) +// writeInvVectBuf serializes an InvVect to w depending on the protocol version. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func writeInvVectBuf(w io.Writer, pver uint32, iv *InvVect, buf []byte) error { + littleEndian.PutUint32(buf[:4], uint32(iv.Type)) + if _, err := w.Write(buf[:4]); err != nil { + return err + } + + _, err := w.Write(iv.Hash[:]) + return err } diff --git a/wire/invvect_test.go b/wire/invvect_test.go index 1d02c09817..cf29d0a2ff 100644 --- a/wire/invvect_test.go +++ b/wire/invvect_test.go @@ -238,10 +238,11 @@ func TestInvVectWire(t *testing.T) { } t.Logf("Running %d tests", len(tests)) + var b [8]byte for i, test := range tests { // Encode to wire format. var buf bytes.Buffer - err := writeInvVect(&buf, test.pver, &test.in) + err := writeInvVectBuf(&buf, test.pver, &test.in, b[:]) if err != nil { t.Errorf("writeInvVect #%d error %v", i, err) continue @@ -255,7 +256,7 @@ func TestInvVectWire(t *testing.T) { // Decode the message from wire format. var iv InvVect rbuf := bytes.NewReader(test.buf) - err = readInvVect(rbuf, test.pver, &iv) + err = readInvVectBuf(rbuf, test.pver, &iv, b[:]) if err != nil { t.Errorf("readInvVect #%d error %v", i, err) continue diff --git a/wire/msgblock.go b/wire/msgblock.go index 4172949dc3..77585e3fb6 100644 --- a/wire/msgblock.go +++ b/wire/msgblock.go @@ -62,12 +62,15 @@ func (msg *MsgBlock) ClearTransactions() { // See Deserialize for decoding blocks stored to disk, such as in a database, as // opposed to decoding blocks from the wire. func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - err := readBlockHeader(r, pver, &msg.Header) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := readBlockHeaderBuf(r, pver, &msg.Header, buf) if err != nil { return err } - txCount, err := ReadVarInt(r, pver) + txCount, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -81,10 +84,13 @@ func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) er return messageError("MsgBlock.BtcDecode", str) } + scriptBuf := scriptPool.Borrow() + defer scriptPool.Return(scriptBuf) + msg.Transactions = make([]*MsgTx, 0, txCount) for i := uint64(0); i < txCount; i++ { tx := MsgTx{} - err := tx.BtcDecode(r, pver, enc) + err := tx.btcDecode(r, pver, enc, buf, scriptBuf[:]) if err != nil { return err } @@ -129,15 +135,18 @@ func (msg *MsgBlock) DeserializeNoWitness(r io.Reader) error { func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) { fullLen := r.Len() + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + // At the current time, there is no difference between the wire encoding // at protocol version 0 and the stable long-term storage format. As // a result, make use of existing wire protocol functions. - err := readBlockHeader(r, 0, &msg.Header) + err := readBlockHeaderBuf(r, 0, &msg.Header, buf) if err != nil { return nil, err } - txCount, err := ReadVarInt(r, 0) + txCount, err := ReadVarIntBuf(r, 0, buf) if err != nil { return nil, err } @@ -151,6 +160,9 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) { return nil, messageError("MsgBlock.DeserializeTxLoc", str) } + scriptBuf := scriptPool.Borrow() + defer scriptPool.Return(scriptBuf) + // Deserialize each transaction while keeping track of its location // within the byte stream. msg.Transactions = make([]*MsgTx, 0, txCount) @@ -158,7 +170,7 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) { for i := uint64(0); i < txCount; i++ { txLocs[i].TxStart = fullLen - r.Len() tx := MsgTx{} - err := tx.Deserialize(r) + err := tx.btcDecode(r, 0, WitnessEncoding, buf, scriptBuf[:]) if err != nil { return nil, err } @@ -174,18 +186,21 @@ func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) { // See Serialize for encoding blocks to be stored to disk, such as in a // database, as opposed to encoding blocks for the wire. func (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { - err := writeBlockHeader(w, pver, &msg.Header) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := writeBlockHeaderBuf(w, pver, &msg.Header, buf) if err != nil { return err } - err = WriteVarInt(w, pver, uint64(len(msg.Transactions))) + err = WriteVarIntBuf(w, pver, uint64(len(msg.Transactions)), buf) if err != nil { return err } for _, tx := range msg.Transactions { - err = tx.BtcEncode(w, pver, enc) + err = tx.btcEncode(w, pver, enc, buf) if err != nil { return err } diff --git a/wire/msgcfcheckpt.go b/wire/msgcfcheckpt.go index fc3fd53295..397a3c137a 100644 --- a/wire/msgcfcheckpt.go +++ b/wire/msgcfcheckpt.go @@ -52,20 +52,22 @@ func (msg *MsgCFCheckpt) AddCFHeader(header *chainhash.Hash) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgCFCheckpt) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) error { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + // Read filter type - err := readElement(r, &msg.FilterType) - if err != nil { + if _, err := io.ReadFull(r, buf[:1]); err != nil { return err } + msg.FilterType = FilterType(buf[0]) // Read stop hash - err = readElement(r, &msg.StopHash) - if err != nil { + if _, err := io.ReadFull(r, msg.StopHash[:]); err != nil { return err } // Read number of filter headers - count, err := ReadVarInt(r, pver) + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -80,7 +82,7 @@ func (msg *MsgCFCheckpt) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) msg.FilterHeaders = make([]*chainhash.Hash, count) for i := uint64(0); i < count; i++ { var cfh chainhash.Hash - err := readElement(r, &cfh) + _, err := io.ReadFull(r, cfh[:]) if err != nil { return err } @@ -93,27 +95,29 @@ func (msg *MsgCFCheckpt) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgCFCheckpt) BtcEncode(w io.Writer, pver uint32, _ MessageEncoding) error { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + // Write filter type - err := writeElement(w, msg.FilterType) - if err != nil { + buf[0] = byte(msg.FilterType) + if _, err := w.Write(buf[:1]); err != nil { return err } // Write stop hash - err = writeElement(w, msg.StopHash) - if err != nil { + if _, err := w.Write(msg.StopHash[:]); err != nil { return err } // Write length of FilterHeaders slice count := len(msg.FilterHeaders) - err = WriteVarInt(w, pver, uint64(count)) + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, cfh := range msg.FilterHeaders { - err := writeElement(w, cfh) + _, err := w.Write(cfh[:]) if err != nil { return err } diff --git a/wire/msgcfheaders.go b/wire/msgcfheaders.go index 40d30f9b46..e1af2c324d 100644 --- a/wire/msgcfheaders.go +++ b/wire/msgcfheaders.go @@ -48,26 +48,27 @@ func (msg *MsgCFHeaders) AddCFHash(hash *chainhash.Hash) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgCFHeaders) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) error { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + // Read filter type - err := readElement(r, &msg.FilterType) - if err != nil { + if _, err := io.ReadFull(r, buf[:1]); err != nil { return err } + msg.FilterType = FilterType(buf[0]) // Read stop hash - err = readElement(r, &msg.StopHash) - if err != nil { + if _, err := io.ReadFull(r, msg.StopHash[:]); err != nil { return err } // Read prev filter header - err = readElement(r, &msg.PrevFilterHeader) - if err != nil { + if _, err := io.ReadFull(r, msg.PrevFilterHeader[:]); err != nil { return err } // Read number of filter headers - count, err := ReadVarInt(r, pver) + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -85,7 +86,7 @@ func (msg *MsgCFHeaders) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) msg.FilterHashes = make([]*chainhash.Hash, 0, count) for i := uint64(0); i < count; i++ { var cfh chainhash.Hash - err := readElement(r, &cfh) + _, err := io.ReadFull(r, cfh[:]) if err != nil { return err } @@ -98,40 +99,40 @@ func (msg *MsgCFHeaders) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgCFHeaders) BtcEncode(w io.Writer, pver uint32, _ MessageEncoding) error { + count := len(msg.FilterHashes) + if count > MaxCFHeadersPerMsg { + str := fmt.Sprintf("too many committed filter headers for "+ + "message [count %v, max %v]", count, + MaxBlockHeadersPerMsg) + return messageError("MsgCFHeaders.BtcEncode", str) + } + + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + // Write filter type - err := writeElement(w, msg.FilterType) - if err != nil { + buf[0] = byte(msg.FilterType) + if _, err := w.Write(buf[:1]); err != nil { return err } // Write stop hash - err = writeElement(w, msg.StopHash) - if err != nil { + if _, err := w.Write(msg.StopHash[:]); err != nil { return err } // Write prev filter header - err = writeElement(w, msg.PrevFilterHeader) - if err != nil { + if _, err := w.Write(msg.PrevFilterHeader[:]); err != nil { return err } - // Limit to max committed headers per message. - count := len(msg.FilterHashes) - if count > MaxCFHeadersPerMsg { - str := fmt.Sprintf("too many committed filter headers for "+ - "message [count %v, max %v]", count, - MaxBlockHeadersPerMsg) - return messageError("MsgCFHeaders.BtcEncode", str) - } - - err = WriteVarInt(w, pver, uint64(count)) + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, cfh := range msg.FilterHashes { - err := writeElement(w, cfh) + _, err := w.Write(cfh[:]) if err != nil { return err } diff --git a/wire/msgcfilter.go b/wire/msgcfilter.go index 097590b2ce..d7cf16378a 100644 --- a/wire/msgcfilter.go +++ b/wire/msgcfilter.go @@ -38,19 +38,22 @@ type MsgCFilter struct { // This is part of the Message interface implementation. func (msg *MsgCFilter) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) error { // Read filter type - err := readElement(r, &msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + if _, err := io.ReadFull(r, buf[:1]); err != nil { return err } + msg.FilterType = FilterType(buf[0]) // Read the hash of the filter's block - err = readElement(r, &msg.BlockHash) - if err != nil { + if _, err := io.ReadFull(r, msg.BlockHash[:]); err != nil { return err } // Read filter data - msg.Data, err = ReadVarBytes(r, pver, MaxCFilterDataSize, + var err error + msg.Data, err = ReadVarBytesBuf(r, pver, buf, MaxCFilterDataSize, "cfilter data") return err } @@ -65,17 +68,20 @@ func (msg *MsgCFilter) BtcEncode(w io.Writer, pver uint32, _ MessageEncoding) er return messageError("MsgCFilter.BtcEncode", str) } - err := writeElement(w, msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + buf[0] = byte(msg.FilterType) + if _, err := w.Write(buf[:1]); err != nil { return err } - err = writeElement(w, msg.BlockHash) - if err != nil { + if _, err := w.Write(msg.BlockHash[:]); err != nil { return err } - return WriteVarBytes(w, pver, msg.Data) + err := WriteVarBytesBuf(w, pver, msg.Data, buf) + return err } // Deserialize decodes a filter from r into the receiver using a format that is diff --git a/wire/msggetblocks.go b/wire/msggetblocks.go index caf4400ca4..da8bb878d2 100644 --- a/wire/msggetblocks.go +++ b/wire/msggetblocks.go @@ -51,16 +51,20 @@ func (msg *MsgGetBlocks) AddBlockLocatorHash(hash *chainhash.Hash) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetBlocks) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - err := readElement(r, &msg.ProtocolVersion) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } + msg.ProtocolVersion = littleEndian.Uint32(buf[:4]) // Read num block locator hashes and limit to max. - count, err := ReadVarInt(r, pver) + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } + if count > MaxBlockLocatorsPerMsg { str := fmt.Sprintf("too many block locator hashes for message "+ "[count %v, max %v]", count, MaxBlockLocatorsPerMsg) @@ -73,14 +77,15 @@ func (msg *MsgGetBlocks) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding msg.BlockLocatorHashes = make([]*chainhash.Hash, 0, count) for i := uint64(0); i < count; i++ { hash := &locatorHashes[i] - err := readElement(r, hash) + _, err := io.ReadFull(r, hash[:]) if err != nil { return err } msg.AddBlockLocatorHash(hash) } - return readElement(r, &msg.HashStop) + _, err = io.ReadFull(r, msg.HashStop[:]) + return err } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. @@ -93,24 +98,28 @@ func (msg *MsgGetBlocks) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding return messageError("MsgGetBlocks.BtcEncode", str) } - err := writeElement(w, msg.ProtocolVersion) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + littleEndian.PutUint32(buf[:4], msg.ProtocolVersion) + if _, err := w.Write(buf[:4]); err != nil { return err } - err = WriteVarInt(w, pver, uint64(count)) + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, hash := range msg.BlockLocatorHashes { - err = writeElement(w, hash) + _, err := w.Write(hash[:]) if err != nil { return err } } - return writeElement(w, &msg.HashStop) + _, err = w.Write(msg.HashStop[:]) + return err } // Command returns the protocol command string for the message. This is part diff --git a/wire/msggetcfcheckpt.go b/wire/msggetcfcheckpt.go index c30a86cecd..c57aa5adaf 100644 --- a/wire/msggetcfcheckpt.go +++ b/wire/msggetcfcheckpt.go @@ -21,23 +21,31 @@ type MsgGetCFCheckpt struct { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetCFCheckpt) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) error { - err := readElement(r, &msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + if _, err := io.ReadFull(r, buf[:1]); err != nil { return err } + msg.FilterType = FilterType(buf[0]) - return readElement(r, &msg.StopHash) + _, err := io.ReadFull(r, msg.StopHash[:]) + return err } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetCFCheckpt) BtcEncode(w io.Writer, pver uint32, _ MessageEncoding) error { - err := writeElement(w, msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + buf[0] = byte(msg.FilterType) + if _, err := w.Write(buf[:1]); err != nil { return err } - return writeElement(w, &msg.StopHash) + _, err := w.Write(msg.StopHash[:]) + return err } // Command returns the protocol command string for the message. This is part diff --git a/wire/msggetcfheaders.go b/wire/msggetcfheaders.go index 03a1caf72f..e26f439808 100644 --- a/wire/msggetcfheaders.go +++ b/wire/msggetcfheaders.go @@ -22,33 +22,41 @@ type MsgGetCFHeaders struct { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetCFHeaders) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) error { - err := readElement(r, &msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + if _, err := io.ReadFull(r, buf[:1]); err != nil { return err } + msg.FilterType = FilterType(buf[0]) - err = readElement(r, &msg.StartHeight) - if err != nil { + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } + msg.StartHeight = littleEndian.Uint32(buf[:4]) - return readElement(r, &msg.StopHash) + _, err := io.ReadFull(r, msg.StopHash[:]) + return err } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetCFHeaders) BtcEncode(w io.Writer, pver uint32, _ MessageEncoding) error { - err := writeElement(w, msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + buf[0] = byte(msg.FilterType) + if _, err := w.Write(buf[:1]); err != nil { return err } - err = writeElement(w, &msg.StartHeight) - if err != nil { + littleEndian.PutUint32(buf[:4], msg.StartHeight) + if _, err := w.Write(buf[:4]); err != nil { return err } - return writeElement(w, &msg.StopHash) + _, err := w.Write(msg.StopHash[:]) + return err } // Command returns the protocol command string for the message. This is part diff --git a/wire/msggetcfilters.go b/wire/msggetcfilters.go index 8002413826..1e6e225587 100644 --- a/wire/msggetcfilters.go +++ b/wire/msggetcfilters.go @@ -26,33 +26,41 @@ type MsgGetCFilters struct { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetCFilters) BtcDecode(r io.Reader, pver uint32, _ MessageEncoding) error { - err := readElement(r, &msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + if _, err := io.ReadFull(r, buf[:1]); err != nil { return err } + msg.FilterType = FilterType(buf[0]) - err = readElement(r, &msg.StartHeight) - if err != nil { + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } + msg.StartHeight = littleEndian.Uint32(buf[:4]) - return readElement(r, &msg.StopHash) + _, err := io.ReadFull(r, msg.StopHash[:]) + return err } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. // This is part of the Message interface implementation. func (msg *MsgGetCFilters) BtcEncode(w io.Writer, pver uint32, _ MessageEncoding) error { - err := writeElement(w, msg.FilterType) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + buf[0] = byte(msg.FilterType) + if _, err := w.Write(buf[:1]); err != nil { return err } - err = writeElement(w, &msg.StartHeight) - if err != nil { + littleEndian.PutUint32(buf[:4], msg.StartHeight) + if _, err := w.Write(buf[:4]); err != nil { return err } - return writeElement(w, &msg.StopHash) + _, err := w.Write(msg.StopHash[:]) + return err } // Command returns the protocol command string for the message. This is part diff --git a/wire/msggetdata.go b/wire/msggetdata.go index 5837fac5ba..f306845677 100644 --- a/wire/msggetdata.go +++ b/wire/msggetdata.go @@ -38,7 +38,10 @@ func (msg *MsgGetData) AddInvVect(iv *InvVect) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetData) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - count, err := ReadVarInt(r, pver) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -55,7 +58,7 @@ func (msg *MsgGetData) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) msg.InvList = make([]*InvVect, 0, count) for i := uint64(0); i < count; i++ { iv := &invList[i] - err := readInvVect(r, pver, iv) + err := readInvVectBuf(r, pver, iv, buf) if err != nil { return err } @@ -75,13 +78,16 @@ func (msg *MsgGetData) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) return messageError("MsgGetData.BtcEncode", str) } - err := WriteVarInt(w, pver, uint64(count)) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, iv := range msg.InvList { - err := writeInvVect(w, pver, iv) + err := writeInvVectBuf(w, pver, iv, buf) if err != nil { return err } diff --git a/wire/msggetheaders.go b/wire/msggetheaders.go index 0bbe42cb03..f49e4c0dd4 100644 --- a/wire/msggetheaders.go +++ b/wire/msggetheaders.go @@ -48,16 +48,20 @@ func (msg *MsgGetHeaders) AddBlockLocatorHash(hash *chainhash.Hash) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgGetHeaders) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - err := readElement(r, &msg.ProtocolVersion) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } + msg.ProtocolVersion = littleEndian.Uint32(buf[:4]) // Read num block locator hashes and limit to max. - count, err := ReadVarInt(r, pver) + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } + if count > MaxBlockLocatorsPerMsg { str := fmt.Sprintf("too many block locator hashes for message "+ "[count %v, max %v]", count, MaxBlockLocatorsPerMsg) @@ -70,14 +74,15 @@ func (msg *MsgGetHeaders) BtcDecode(r io.Reader, pver uint32, enc MessageEncodin msg.BlockLocatorHashes = make([]*chainhash.Hash, 0, count) for i := uint64(0); i < count; i++ { hash := &locatorHashes[i] - err := readElement(r, hash) + _, err := io.ReadFull(r, hash[:]) if err != nil { return err } msg.AddBlockLocatorHash(hash) } - return readElement(r, &msg.HashStop) + _, err = io.ReadFull(r, msg.HashStop[:]) + return err } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. @@ -91,24 +96,28 @@ func (msg *MsgGetHeaders) BtcEncode(w io.Writer, pver uint32, enc MessageEncodin return messageError("MsgGetHeaders.BtcEncode", str) } - err := writeElement(w, msg.ProtocolVersion) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + littleEndian.PutUint32(buf[:4], msg.ProtocolVersion) + if _, err := w.Write(buf[:4]); err != nil { return err } - err = WriteVarInt(w, pver, uint64(count)) + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, hash := range msg.BlockLocatorHashes { - err := writeElement(w, hash) + _, err := w.Write(hash[:]) if err != nil { return err } } - return writeElement(w, &msg.HashStop) + _, err = w.Write(msg.HashStop[:]) + return err } // Command returns the protocol command string for the message. This is part diff --git a/wire/msgheaders.go b/wire/msgheaders.go index 7d18d930e0..46edc59395 100644 --- a/wire/msgheaders.go +++ b/wire/msgheaders.go @@ -37,7 +37,10 @@ func (msg *MsgHeaders) AddBlockHeader(bh *BlockHeader) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgHeaders) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - count, err := ReadVarInt(r, pver) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -55,12 +58,12 @@ func (msg *MsgHeaders) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) msg.Headers = make([]*BlockHeader, 0, count) for i := uint64(0); i < count; i++ { bh := &headers[i] - err := readBlockHeader(r, pver, bh) + err := readBlockHeaderBuf(r, pver, bh, buf) if err != nil { return err } - txCount, err := ReadVarInt(r, pver) + txCount, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -88,13 +91,16 @@ func (msg *MsgHeaders) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) return messageError("MsgHeaders.BtcEncode", str) } - err := WriteVarInt(w, pver, uint64(count)) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, bh := range msg.Headers { - err := writeBlockHeader(w, pver, bh) + err := writeBlockHeaderBuf(w, pver, bh, buf) if err != nil { return err } @@ -103,7 +109,7 @@ func (msg *MsgHeaders) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) // of transactions on header messages. This is really just an // artifact of the way the original implementation serializes // block headers, but it is required. - err = WriteVarInt(w, pver, 0) + err = WriteVarIntBuf(w, pver, 0, buf) if err != nil { return err } diff --git a/wire/msginv.go b/wire/msginv.go index 5377b179c3..4be528dec6 100644 --- a/wire/msginv.go +++ b/wire/msginv.go @@ -46,7 +46,10 @@ func (msg *MsgInv) AddInvVect(iv *InvVect) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgInv) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - count, err := ReadVarInt(r, pver) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -63,7 +66,7 @@ func (msg *MsgInv) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) erro msg.InvList = make([]*InvVect, 0, count) for i := uint64(0); i < count; i++ { iv := &invList[i] - err := readInvVect(r, pver, iv) + err := readInvVectBuf(r, pver, iv, buf) if err != nil { return err } @@ -83,13 +86,16 @@ func (msg *MsgInv) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) erro return messageError("MsgInv.BtcEncode", str) } - err := WriteVarInt(w, pver, uint64(count)) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, iv := range msg.InvList { - err := writeInvVect(w, pver, iv) + err := writeInvVectBuf(w, pver, iv, buf) if err != nil { return err } diff --git a/wire/msgmerkleblock.go b/wire/msgmerkleblock.go index d2ee472178..eacbdc5847 100644 --- a/wire/msgmerkleblock.go +++ b/wire/msgmerkleblock.go @@ -49,18 +49,21 @@ func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32, enc MessageEncodi return messageError("MsgMerkleBlock.BtcDecode", str) } - err := readBlockHeader(r, pver, &msg.Header) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := readBlockHeaderBuf(r, pver, &msg.Header, buf) if err != nil { return err } - err = readElement(r, &msg.Transactions) - if err != nil { + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } + msg.Transactions = littleEndian.Uint32(buf[:4]) // Read num block locator hashes and limit to max. - count, err := ReadVarInt(r, pver) + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -76,14 +79,14 @@ func (msg *MsgMerkleBlock) BtcDecode(r io.Reader, pver uint32, enc MessageEncodi msg.Hashes = make([]*chainhash.Hash, 0, count) for i := uint64(0); i < count; i++ { hash := &hashes[i] - err := readElement(r, hash) + _, err := io.ReadFull(r, hash[:]) if err != nil { return err } msg.AddTxHash(hash) } - msg.Flags, err = ReadVarBytes(r, pver, maxFlagsPerMerkleBlock, + msg.Flags, err = ReadVarBytesBuf(r, pver, buf, maxFlagsPerMerkleBlock, "merkle block flags size") return err } @@ -111,28 +114,32 @@ func (msg *MsgMerkleBlock) BtcEncode(w io.Writer, pver uint32, enc MessageEncodi return messageError("MsgMerkleBlock.BtcDecode", str) } - err := writeBlockHeader(w, pver, &msg.Header) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := writeBlockHeaderBuf(w, pver, &msg.Header, buf) if err != nil { return err } - err = writeElement(w, msg.Transactions) - if err != nil { + littleEndian.PutUint32(buf[:4], msg.Transactions) + if _, err := w.Write(buf[:4]); err != nil { return err } - err = WriteVarInt(w, pver, uint64(numHashes)) + err = WriteVarIntBuf(w, pver, uint64(numHashes), buf) if err != nil { return err } for _, hash := range msg.Hashes { - err = writeElement(w, hash) + _, err := w.Write(hash[:]) if err != nil { return err } } - return WriteVarBytes(w, pver, msg.Flags) + err = WriteVarBytesBuf(w, pver, msg.Flags, buf) + return err } // Command returns the protocol command string for the message. This is part diff --git a/wire/msgnotfound.go b/wire/msgnotfound.go index e867681668..23486d48b6 100644 --- a/wire/msgnotfound.go +++ b/wire/msgnotfound.go @@ -35,7 +35,10 @@ func (msg *MsgNotFound) AddInvVect(iv *InvVect) error { // BtcDecode decodes r using the bitcoin protocol encoding into the receiver. // This is part of the Message interface implementation. func (msg *MsgNotFound) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - count, err := ReadVarInt(r, pver) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -52,7 +55,7 @@ func (msg *MsgNotFound) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) msg.InvList = make([]*InvVect, 0, count) for i := uint64(0); i < count; i++ { iv := &invList[i] - err := readInvVect(r, pver, iv) + err := readInvVectBuf(r, pver, iv, buf) if err != nil { return err } @@ -72,13 +75,16 @@ func (msg *MsgNotFound) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) return messageError("MsgNotFound.BtcEncode", str) } - err := WriteVarInt(w, pver, uint64(count)) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := WriteVarIntBuf(w, pver, uint64(count), buf) if err != nil { return err } for _, iv := range msg.InvList { - err := writeInvVect(w, pver, iv) + err := writeInvVectBuf(w, pver, iv, buf) if err != nil { return err } diff --git a/wire/msgping.go b/wire/msgping.go index b2f346e0e1..dd5e61e6bf 100644 --- a/wire/msgping.go +++ b/wire/msgping.go @@ -32,10 +32,11 @@ func (msg *MsgPing) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) err // NOTE: > is not a mistake here. The BIP0031 was defined as AFTER // the version unlike most others. if pver > BIP0031Version { - err := readElement(r, &msg.Nonce) + nonce, err := binarySerializer.Uint64(r, littleEndian) if err != nil { return err } + msg.Nonce = nonce } return nil @@ -48,7 +49,7 @@ func (msg *MsgPing) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) err // NOTE: > is not a mistake here. The BIP0031 was defined as AFTER // the version unlike most others. if pver > BIP0031Version { - err := writeElement(w, msg.Nonce) + err := binarySerializer.PutUint64(w, littleEndian, msg.Nonce) if err != nil { return err } diff --git a/wire/msgpong.go b/wire/msgpong.go index eec80d8d5b..01e83792ef 100644 --- a/wire/msgpong.go +++ b/wire/msgpong.go @@ -31,7 +31,13 @@ func (msg *MsgPong) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) err return messageError("MsgPong.BtcDecode", str) } - return readElement(r, &msg.Nonce) + nonce, err := binarySerializer.Uint64(r, littleEndian) + if err != nil { + return err + } + msg.Nonce = nonce + + return nil } // BtcEncode encodes the receiver to w using the bitcoin protocol encoding. @@ -45,7 +51,7 @@ func (msg *MsgPong) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) err return messageError("MsgPong.BtcEncode", str) } - return writeElement(w, msg.Nonce) + return binarySerializer.PutUint64(w, littleEndian, msg.Nonce) } // Command returns the protocol command string for the message. This is part diff --git a/wire/msgreject.go b/wire/msgreject.go index a00eeff6f6..ea16dd19f4 100644 --- a/wire/msgreject.go +++ b/wire/msgreject.go @@ -81,21 +81,24 @@ func (msg *MsgReject) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) e } // Command that was rejected. - cmd, err := ReadVarString(r, pver) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + cmd, err := readVarStringBuf(r, pver, buf) if err != nil { return err } msg.Cmd = cmd // Code indicating why the command was rejected. - err = readElement(r, &msg.Code) - if err != nil { + if _, err := io.ReadFull(r, buf[:1]); err != nil { return err } + msg.Code = RejectCode(buf[0]) // Human readable string with specific details (over and above the // reject code above) about why the command was rejected. - reason, err := ReadVarString(r, pver) + reason, err := readVarStringBuf(r, pver, buf) if err != nil { return err } @@ -104,7 +107,7 @@ func (msg *MsgReject) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) e // CmdBlock and CmdTx messages have an additional hash field that // identifies the specific block or transaction. if msg.Cmd == CmdBlock || msg.Cmd == CmdTx { - err := readElement(r, &msg.Hash) + _, err := io.ReadFull(r, msg.Hash[:]) if err != nil { return err } @@ -123,20 +126,23 @@ func (msg *MsgReject) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) e } // Command that was rejected. - err := WriteVarString(w, pver, msg.Cmd) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := writeVarStringBuf(w, pver, msg.Cmd, buf) if err != nil { return err } // Code indicating why the command was rejected. - err = writeElement(w, msg.Code) - if err != nil { + buf[0] = byte(msg.Code) + if _, err := w.Write(buf[:1]); err != nil { return err } // Human readable string with specific details (over and above the // reject code above) about why the command was rejected. - err = WriteVarString(w, pver, msg.Reason) + err = writeVarStringBuf(w, pver, msg.Reason, buf) if err != nil { return err } @@ -144,7 +150,7 @@ func (msg *MsgReject) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) e // CmdBlock and CmdTx messages have an additional hash field that // identifies the specific block or transaction. if msg.Cmd == CmdBlock || msg.Cmd == CmdTx { - err := writeElement(w, &msg.Hash) + _, err := w.Write(msg.Hash[:]) if err != nil { return err } diff --git a/wire/msgtx.go b/wire/msgtx.go index 80a5fcc674..eab265c35d 100644 --- a/wire/msgtx.go +++ b/wire/msgtx.go @@ -5,10 +5,11 @@ package wire import ( - "bytes" + "errors" "fmt" "io" "strconv" + "strings" "github.com/btcsuite/btcd/chaincfg/chainhash" ) @@ -92,7 +93,7 @@ const ( // scripts per transaction being simultaneously deserialized by 125 // peers. Thus, the peak usage of the free list is 12,500 * 512 = // 6,400,000 bytes. - freeListMaxItems = 12500 + freeListMaxItems = 125 // maxWitnessItemsPerInput is the maximum number of witness items to // be read for the witness data for a single TxIn. This number is @@ -145,6 +146,10 @@ const ( WitnessFlag TxFlag = 0x01 ) +const scriptSlabSize = 1 << 22 + +type scriptSlab [scriptSlabSize]byte + // scriptFreeList defines a free list of byte slices (up to the maximum number // defined by the freeListMaxItems constant) that have a cap according to the // freeListMaxScriptSize constant. It is used to provide temporary buffers for @@ -153,7 +158,7 @@ const ( // // The caller can obtain a buffer from the free list by calling the Borrow // function and should return it via the Return function when done using it. -type scriptFreeList chan []byte +type scriptFreeList chan *scriptSlab // Borrow returns a byte slice from the free list with a length according the // provided size. A new buffer is allocated if there are any items available. @@ -162,18 +167,14 @@ type scriptFreeList chan []byte // a new buffer of the appropriate size is allocated and returned. It is safe // to attempt to return said buffer via the Return function as it will be // ignored and allowed to go the garbage collector. -func (c scriptFreeList) Borrow(size uint64) []byte { - if size > freeListMaxScriptSize { - return make([]byte, size) - } - - var buf []byte +func (c scriptFreeList) Borrow() *scriptSlab { + var buf *scriptSlab select { case buf = <-c: default: - buf = make([]byte, freeListMaxScriptSize) + buf = new(scriptSlab) } - return buf[:size] + return buf } // Return puts the provided byte slice back on the free list when it has a cap @@ -181,13 +182,7 @@ func (c scriptFreeList) Borrow(size uint64) []byte { // the Borrow function. Any slices that are not of the appropriate size, such // as those whose size is greater than the largest allowed free list item size // are simply ignored so they can go to the garbage collector. -func (c scriptFreeList) Return(buf []byte) { - // Ignore any buffers returned that aren't the expected size for the - // free list. - if cap(buf) != freeListMaxScriptSize { - return - } - +func (c scriptFreeList) Return(buf *scriptSlab) { // Return the buffer to the free list when it's not full. Otherwise let // it be garbage collected. select { @@ -200,7 +195,7 @@ func (c scriptFreeList) Return(buf []byte) { // Create the concurrent safe free list to use for script deserialization. As // previously described, this free list is maintained to significantly reduce // the number of allocations. -var scriptPool scriptFreeList = make(chan []byte, freeListMaxItems) +var scriptPool = make(scriptFreeList, freeListMaxItems) // OutPoint defines a bitcoin data type that is used to track previous // transaction outputs. @@ -218,6 +213,29 @@ func NewOutPoint(hash *chainhash.Hash, index uint32) *OutPoint { } } +// NewOutPointFromString returns a new bitcoin transaction outpoint parsed from +// the provided string, which should be in the format "hash:index". +func NewOutPointFromString(outpoint string) (*OutPoint, error) { + parts := strings.Split(outpoint, ":") + if len(parts) != 2 { + return nil, errors.New("outpoint should be of the form txid:index") + } + hash, err := chainhash.NewHashFromStr(parts[0]) + if err != nil { + return nil, err + } + + outputIndex, err := strconv.ParseUint(parts[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("invalid output index: %v", err) + } + + return &OutPoint{ + Hash: *hash, + Index: uint32(outputIndex), + }, nil +} + // String returns the OutPoint in the human-readable form "hash:index". func (o OutPoint) String() string { // Allocate enough for hash string, colon, and 10 digits. Although @@ -267,7 +285,7 @@ func NewTxIn(prevOut *OutPoint, signatureScript []byte, witness [][]byte) *TxIn // a slice of byte slices, or a stack with one or many elements. type TxWitness [][]byte -// SerializeSize returns the number of bytes it would take to serialize the the +// SerializeSize returns the number of bytes it would take to serialize the // transaction input's witness. func (t TxWitness) SerializeSize() int { // A varint to signal the number of elements the witness has. @@ -332,13 +350,7 @@ func (msg *MsgTx) AddTxOut(to *TxOut) { // TxHash generates the Hash for the transaction. func (msg *MsgTx) TxHash() chainhash.Hash { - // Encode the transaction and calculate double sha256 on the result. - // Ignore the error returns since the only way the encode could fail - // is being out of memory or due to nil pointers, both of which would - // cause a run-time panic. - buf := bytes.NewBuffer(make([]byte, 0, msg.SerializeSizeStripped())) - _ = msg.SerializeNoWitness(buf) - return chainhash.DoubleHashH(buf.Bytes()) + return chainhash.DoubleHashRaw(msg.SerializeNoWitness) } // WitnessHash generates the hash of the transaction serialized according to @@ -348,9 +360,7 @@ func (msg *MsgTx) TxHash() chainhash.Hash { // is the same as its txid. func (msg *MsgTx) WitnessHash() chainhash.Hash { if msg.HasWitness() { - buf := bytes.NewBuffer(make([]byte, 0, msg.SerializeSize())) - _ = msg.Serialize(buf) - return chainhash.DoubleHashH(buf.Bytes()) + return chainhash.DoubleHashRaw(msg.Serialize) } return msg.TxHash() @@ -436,13 +446,25 @@ func (msg *MsgTx) Copy() *MsgTx { // See Deserialize for decoding transactions stored to disk, such as in a // database, as opposed to decoding transactions from the wire. func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error { - version, err := binarySerializer.Uint32(r, littleEndian) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + sbuf := scriptPool.Borrow() + defer scriptPool.Return(sbuf) + + err := msg.btcDecode(r, pver, enc, buf, sbuf[:]) + return err +} + +func (msg *MsgTx) btcDecode(r io.Reader, pver uint32, enc MessageEncoding, + buf, sbuf []byte) error { + + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } - msg.Version = int32(version) + msg.Version = int32(littleEndian.Uint32(buf[:4])) - count, err := ReadVarInt(r, pver) + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -466,7 +488,7 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error // With the Segregated Witness specific fields decoded, we can // now read in the actual txin count. - count, err = ReadVarInt(r, pver) + count, err = ReadVarIntBuf(r, pver, buf) if err != nil { return err } @@ -482,35 +504,6 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error return messageError("MsgTx.BtcDecode", str) } - // returnScriptBuffers is a closure that returns any script buffers that - // were borrowed from the pool when there are any deserialization - // errors. This is only valid to call before the final step which - // replaces the scripts with the location in a contiguous buffer and - // returns them. - returnScriptBuffers := func() { - for _, txIn := range msg.TxIn { - if txIn == nil { - continue - } - - if txIn.SignatureScript != nil { - scriptPool.Return(txIn.SignatureScript) - } - - for _, witnessElem := range txIn.Witness { - if witnessElem != nil { - scriptPool.Return(witnessElem) - } - } - } - for _, txOut := range msg.TxOut { - if txOut == nil || txOut.PkScript == nil { - continue - } - scriptPool.Return(txOut.PkScript) - } - } - // Deserialize the inputs. var totalScriptSize uint64 txIns := make([]TxIn, count) @@ -520,17 +513,16 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error // and needs to be returned to the pool on error. ti := &txIns[i] msg.TxIn[i] = ti - err = readTxIn(r, pver, msg.Version, ti) + err = readTxInBuf(r, pver, msg.Version, ti, buf, sbuf) if err != nil { - returnScriptBuffers() return err } totalScriptSize += uint64(len(ti.SignatureScript)) + sbuf = sbuf[len(ti.SignatureScript):] } - count, err = ReadVarInt(r, pver) + count, err = ReadVarIntBuf(r, pver, buf) if err != nil { - returnScriptBuffers() return err } @@ -538,7 +530,6 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error // message. It would be possible to cause memory exhaustion and panics // without a sane upper bound on this count. if count > uint64(maxTxOutPerMessage) { - returnScriptBuffers() str := fmt.Sprintf("too many output transactions to fit into "+ "max message size [count %d, max %d]", count, maxTxOutPerMessage) @@ -553,12 +544,12 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error // and needs to be returned to the pool on error. to := &txOuts[i] msg.TxOut[i] = to - err = ReadTxOut(r, pver, msg.Version, to) + err = readTxOutBuf(r, pver, msg.Version, to, buf, sbuf) if err != nil { - returnScriptBuffers() return err } totalScriptSize += uint64(len(to.PkScript)) + sbuf = sbuf[len(to.PkScript):] } // If the transaction's flag byte isn't 0x00 at this point, then one or @@ -568,16 +559,14 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error // For each input, the witness is encoded as a stack // with one or more items. Therefore, we first read a // varint which encodes the number of stack items. - witCount, err := ReadVarInt(r, pver) + witCount, err := ReadVarIntBuf(r, pver, buf) if err != nil { - returnScriptBuffers() return err } // Prevent a possible memory exhaustion attack by // limiting the witCount value to a sane upper bound. if witCount > maxWitnessItemsPerInput { - returnScriptBuffers() str := fmt.Sprintf("too many witness items to fit "+ "into max message size [count %d, max %d]", witCount, maxWitnessItemsPerInput) @@ -589,23 +578,23 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error // item itself. txin.Witness = make([][]byte, witCount) for j := uint64(0); j < witCount; j++ { - txin.Witness[j], err = readScript( - r, pver, maxWitnessItemSize, "script witness item", + txin.Witness[j], err = readScriptBuf( + r, pver, buf, sbuf, maxWitnessItemSize, + "script witness item", ) if err != nil { - returnScriptBuffers() return err } totalScriptSize += uint64(len(txin.Witness[j])) + sbuf = sbuf[len(txin.Witness[j]):] } } } - msg.LockTime, err = binarySerializer.Uint32(r, littleEndian) - if err != nil { - returnScriptBuffers() + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } + msg.LockTime = littleEndian.Uint32(buf[:4]) // Create a single allocation to house all of the scripts and set each // input signature script and output public key script to the @@ -636,9 +625,6 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error msg.TxIn[i].SignatureScript = scripts[offset:end:end] offset += scriptSize - // Return the temporary script buffer to the pool. - scriptPool.Return(signatureScript) - for j := 0; j < len(msg.TxIn[i].Witness); j++ { // Copy each item within the witness stack for this // input into the contiguous buffer at the appropriate @@ -652,10 +638,6 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error end := offset + witnessElemSize msg.TxIn[i].Witness[j] = scripts[offset:end:end] offset += witnessElemSize - - // Return the temporary buffer used for the witness stack - // item to the pool. - scriptPool.Return(witnessElem) } } for i := 0; i < len(msg.TxOut); i++ { @@ -670,9 +652,6 @@ func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error end := offset + scriptSize msg.TxOut[i].PkScript = scripts[offset:end:end] offset += scriptSize - - // Return the temporary script buffer to the pool. - scriptPool.Return(pkScript) } return nil @@ -708,8 +687,18 @@ func (msg *MsgTx) DeserializeNoWitness(r io.Reader) error { // See Serialize for encoding transactions to be stored to disk, such as in a // database, as opposed to encoding transactions for the wire. func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error { - err := binarySerializer.PutUint32(w, littleEndian, uint32(msg.Version)) - if err != nil { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := msg.btcEncode(w, pver, enc, buf) + return err +} + +func (msg *MsgTx) btcEncode(w io.Writer, pver uint32, enc MessageEncoding, + buf []byte) error { + + littleEndian.PutUint32(buf[:4], uint32(msg.Version)) + if _, err := w.Write(buf[:4]); err != nil { return err } @@ -729,26 +718,26 @@ func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error } count := uint64(len(msg.TxIn)) - err = WriteVarInt(w, pver, count) + err := WriteVarIntBuf(w, pver, count, buf) if err != nil { return err } for _, ti := range msg.TxIn { - err = writeTxIn(w, pver, msg.Version, ti) + err = writeTxInBuf(w, pver, msg.Version, ti, buf) if err != nil { return err } } count = uint64(len(msg.TxOut)) - err = WriteVarInt(w, pver, count) + err = WriteVarIntBuf(w, pver, count, buf) if err != nil { return err } for _, to := range msg.TxOut { - err = WriteTxOut(w, pver, msg.Version, to) + err = WriteTxOutBuf(w, pver, msg.Version, to, buf) if err != nil { return err } @@ -759,14 +748,16 @@ func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error // within the transaction. if doWitness { for _, ti := range msg.TxIn { - err = writeTxWitness(w, pver, msg.Version, ti.Witness) + err = writeTxWitnessBuf(w, pver, msg.Version, ti.Witness, buf) if err != nil { return err } } } - return binarySerializer.PutUint32(w, littleEndian, msg.LockTime) + littleEndian.PutUint32(buf[:4], msg.LockTime) + _, err = w.Write(buf[:4]) + return err } // HasWitness returns false if none of the inputs within the transaction @@ -923,26 +914,58 @@ func NewMsgTx(version int32) *MsgTx { } } -// readOutPoint reads the next sequence of bytes from r as an OutPoint. -func readOutPoint(r io.Reader, pver uint32, version int32, op *OutPoint) error { +// readOutPointBuf reads the next sequence of bytes from r as an OutPoint. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func readOutPointBuf(r io.Reader, pver uint32, version int32, op *OutPoint, + buf []byte) error { + _, err := io.ReadFull(r, op.Hash[:]) if err != nil { return err } - op.Index, err = binarySerializer.Uint32(r, littleEndian) + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + op.Index = littleEndian.Uint32(buf[:4]) + + return nil +} + +// WriteOutPoint encodes op to the bitcoin protocol encoding for an OutPoint to +// w. +func WriteOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := writeOutPointBuf(w, pver, version, op, buf) return err } -// WriteOutPoint encodes op to the bitcoin protocol encoding for an OutPoint +// writeOutPointBuf encodes op to the bitcoin protocol encoding for an OutPoint // to w. -func WriteOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error { +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func writeOutPointBuf(w io.Writer, pver uint32, version int32, op *OutPoint, + buf []byte) error { + _, err := w.Write(op.Hash[:]) if err != nil { return err } - return binarySerializer.PutUint32(w, littleEndian, op.Index) + littleEndian.PutUint32(buf[:4], op.Index) + _, err = w.Write(buf[:4]) + return err } // readScript reads a variable length byte array that represents a transaction @@ -952,8 +975,16 @@ func WriteOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error // memory exhaustion attacks and forced panics through malformed messages. The // fieldName parameter is only used for the error message so it provides more // context in the error. -func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ([]byte, error) { - count, err := ReadVarInt(r, pver) +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func readScriptBuf(r io.Reader, pver uint32, buf, s []byte, + maxAllowed uint32, fieldName string) ([]byte, error) { + + count, err := ReadVarIntBuf(r, pver, buf) if err != nil { return nil, err } @@ -967,58 +998,96 @@ func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ( return nil, messageError("readScript", str) } - b := scriptPool.Borrow(count) - _, err = io.ReadFull(r, b) + _, err = io.ReadFull(r, s[:count]) if err != nil { - scriptPool.Return(b) return nil, err } - return b, nil + return s[:count], nil } -// readTxIn reads the next sequence of bytes from r as a transaction input +// readTxInBuf reads the next sequence of bytes from r as a transaction input // (TxIn). -func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error { - err := readOutPoint(r, pver, version, &ti.PreviousOutPoint) +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func readTxInBuf(r io.Reader, pver uint32, version int32, ti *TxIn, + buf, s []byte) error { + + err := readOutPointBuf(r, pver, version, &ti.PreviousOutPoint, buf) if err != nil { return err } - ti.SignatureScript, err = readScript(r, pver, MaxMessagePayload, + ti.SignatureScript, err = readScriptBuf(r, pver, buf, s, MaxMessagePayload, "transaction input signature script") if err != nil { return err } - return readElement(r, &ti.Sequence) + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + + ti.Sequence = littleEndian.Uint32(buf[:4]) + + return nil } -// writeTxIn encodes ti to the bitcoin protocol encoding for a transaction -// input (TxIn) to w. -func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn) error { - err := WriteOutPoint(w, pver, version, &ti.PreviousOutPoint) +// writeTxInBuf encodes ti to the bitcoin protocol encoding for a transaction +// input (TxIn) to w. If b is non-nil, the provided buffer will be used for +// serializing small values. Otherwise a buffer will be drawn from the +// binarySerializer's pool and return when the method finishes. +func writeTxInBuf(w io.Writer, pver uint32, version int32, ti *TxIn, + buf []byte) error { + + err := writeOutPointBuf(w, pver, version, &ti.PreviousOutPoint, buf) if err != nil { return err } - err = WriteVarBytes(w, pver, ti.SignatureScript) + err = WriteVarBytesBuf(w, pver, ti.SignatureScript, buf) if err != nil { return err } - return binarySerializer.PutUint32(w, littleEndian, ti.Sequence) + littleEndian.PutUint32(buf[:4], ti.Sequence) + _, err = w.Write(buf[:4]) + + return err } // ReadTxOut reads the next sequence of bytes from r as a transaction output // (TxOut). func ReadTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error { - err := readElement(r, &to.Value) + var s scriptSlab + + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := readTxOutBuf(r, pver, version, to, buf, s[:]) + return err +} + +// readTxOutBuf reads the next sequence of bytes from r as a transaction output +// (TxOut). If b is non-nil, the provided buffer will be used for serializing +// small values. Otherwise a buffer will be drawn from the binarySerializer's +// pool and return when the method finishes. +func readTxOutBuf(r io.Reader, pver uint32, version int32, to *TxOut, + buf, s []byte) error { + + _, err := io.ReadFull(r, buf) if err != nil { return err } + to.Value = int64(littleEndian.Uint64(buf)) - to.PkScript, err = readScript(r, pver, MaxMessagePayload, - "transaction output public key script") + to.PkScript, err = readScriptBuf( + r, pver, buf, s, MaxMessagePayload, + "transaction output public key script", + ) return err } @@ -1028,26 +1097,49 @@ func ReadTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error { // NOTE: This function is exported in order to allow txscript to compute the // new sighashes for witness transactions (BIP0143). func WriteTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error { - err := binarySerializer.PutUint64(w, littleEndian, uint64(to.Value)) + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := WriteTxOutBuf(w, pver, version, to, buf) + return err +} + +// WriteTxOutBuf encodes to into the bitcoin protocol encoding for a transaction +// output (TxOut) to w. If b is non-nil, the provided buffer will be used for +// serializing small values. Otherwise a buffer will be drawn from the +// binarySerializer's pool and return when the method finishes. +// +// NOTE: This function is exported in order to allow txscript to compute the +// new sighashes for witness transactions (BIP0143). +func WriteTxOutBuf(w io.Writer, pver uint32, version int32, to *TxOut, + buf []byte) error { + + littleEndian.PutUint64(buf, uint64(to.Value)) + _, err := w.Write(buf) if err != nil { return err } - return WriteVarBytes(w, pver, to.PkScript) + return WriteVarBytesBuf(w, pver, to.PkScript, buf) } -// writeTxWitness encodes the bitcoin protocol encoding for a transaction -// input's witness into to w. -func writeTxWitness(w io.Writer, pver uint32, version int32, wit [][]byte) error { - err := WriteVarInt(w, pver, uint64(len(wit))) +// writeTxWitnessBuf encodes the bitcoin protocol encoding for a transaction +// input's witness into to w. If b is non-nil, the provided buffer will be used +// for serializing small values. Otherwise a buffer will be drawn from the +// binarySerializer's pool and return when the method finishes. +func writeTxWitnessBuf(w io.Writer, pver uint32, version int32, wit [][]byte, + buf []byte) error { + + err := WriteVarIntBuf(w, pver, uint64(len(wit)), buf) if err != nil { return err } for _, item := range wit { - err = WriteVarBytes(w, pver, item) + err = WriteVarBytesBuf(w, pver, item, buf) if err != nil { return err } } + return nil } diff --git a/wire/msgtx_test.go b/wire/msgtx_test.go index 66965043e6..5ec753b62d 100644 --- a/wire/msgtx_test.go +++ b/wire/msgtx_test.go @@ -13,6 +13,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/require" ) // TestTx tests the MsgTx API. @@ -778,6 +779,76 @@ func TestTxWitnessSize(t *testing.T) { } } +// TestTxOutPointFromString performs tests to ensure that the outpoint string +// parser works as expected. +func TestTxOutPointFromString(t *testing.T) { + hashFromStr := func(hash string) chainhash.Hash { + h, _ := chainhash.NewHashFromStr(hash) + return *h + } + + tests := []struct { + name string + input string + result *OutPoint + err bool + }{ + { + name: "normal outpoint 1", + input: "2ebd15a7e758d5f4c7c74181b99e5b8586f88e0682dc13e09d92612a2b2bb0a2:1", + result: &OutPoint{ + Hash: hashFromStr("2ebd15a7e758d5f4c7c74181b99e5b8586f88e0682dc13e09d92612a2b2bb0a2"), + Index: 1, + }, + err: false, + }, + { + name: "normal outpoint 2", + input: "94c7762a68ff164352bd31fd95fa875204e811c09acef40ba781787eb28e3b55:42", + result: &OutPoint{ + Hash: hashFromStr("94c7762a68ff164352bd31fd95fa875204e811c09acef40ba781787eb28e3b55"), + Index: 42, + }, + err: false, + }, + { + name: "big index outpoint", + input: "94c7762a68ff164352bd31fd95fa875204e811c09acef40ba781787eb28e3b55:2147484242", + result: &OutPoint{ + Hash: hashFromStr("94c7762a68ff164352bd31fd95fa875204e811c09acef40ba781787eb28e3b55"), + Index: 2147484242, + }, + err: false, + }, + { + name: "bad string", + input: "not_outpoint_not_outpoint_not_outpoint", + result: nil, + err: true, + }, + { + name: "empty string", + input: "", + result: nil, + err: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + outpoint, err := NewOutPointFromString(test.input) + + isErr := (err != nil) + require.Equal(t, isErr, test.err) + + if !isErr { + require.Equal(t, test.result, outpoint) + } + }) + + } +} + // multiTx is a MsgTx with an input and output and used in various tests. var multiTx = &MsgTx{ Version: 1, diff --git a/wire/netaddress.go b/wire/netaddress.go index 5a2610bccc..e5c8eeea17 100644 --- a/wire/netaddress.go +++ b/wire/netaddress.go @@ -5,7 +5,6 @@ package wire import ( - "encoding/binary" "io" "net" "time" @@ -89,31 +88,60 @@ func NewNetAddress(addr *net.TCPAddr, services ServiceFlag) *NetAddress { // version and whether or not the timestamp is included per ts. Some messages // like version do not include the timestamp. func readNetAddress(r io.Reader, pver uint32, na *NetAddress, ts bool) error { - var ip [16]byte + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + + err := readNetAddressBuf(r, pver, na, ts, buf) + return err +} + +// readNetAddressBuf reads an encoded NetAddress from r depending on the +// protocol version and whether or not the timestamp is included per ts. Some +// messages like version do not include the timestamp. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func readNetAddressBuf(r io.Reader, pver uint32, na *NetAddress, ts bool, + buf []byte) error { + + var ( + timestamp time.Time + services ServiceFlag + ip [16]byte + port uint16 + ) // NOTE: The bitcoin protocol uses a uint32 for the timestamp so it will // stop working somewhere around 2106. Also timestamp wasn't added until // protocol version >= NetAddressTimeVersion if ts && pver >= NetAddressTimeVersion { - err := readElement(r, (*uint32Time)(&na.Timestamp)) - if err != nil { + if _, err := io.ReadFull(r, buf[:4]); err != nil { return err } + timestamp = time.Unix(int64(littleEndian.Uint32(buf[:4])), 0) } - err := readElements(r, &na.Services, &ip) - if err != nil { + if _, err := io.ReadFull(r, buf); err != nil { return err } + services = ServiceFlag(littleEndian.Uint64(buf)) + + if _, err := io.ReadFull(r, ip[:]); err != nil { + return err + } + // Sigh. Bitcoin protocol mixes little and big endian. - port, err := binarySerializer.Uint16(r, bigEndian) - if err != nil { + if _, err := io.ReadFull(r, buf[:2]); err != nil { return err } + port = bigEndian.Uint16(buf[:2]) *na = NetAddress{ - Timestamp: na.Timestamp, - Services: na.Services, + Timestamp: timestamp, + Services: services, IP: net.IP(ip[:]), Port: port, } @@ -124,26 +152,50 @@ func readNetAddress(r io.Reader, pver uint32, na *NetAddress, ts bool) error { // version and whether or not the timestamp is included per ts. Some messages // like version do not include the timestamp. func writeNetAddress(w io.Writer, pver uint32, na *NetAddress, ts bool) error { + buf := binarySerializer.Borrow() + defer binarySerializer.Return(buf) + err := writeNetAddressBuf(w, pver, na, ts, buf) + + return err +} + +// writeNetAddressBuf serializes a NetAddress to w depending on the protocol +// version and whether or not the timestamp is included per ts. Some messages +// like version do not include the timestamp. +// +// If b is non-nil, the provided buffer will be used for serializing small +// values. Otherwise a buffer will be drawn from the binarySerializer's pool +// and return when the method finishes. +// +// NOTE: b MUST either be nil or at least an 8-byte slice. +func writeNetAddressBuf(w io.Writer, pver uint32, na *NetAddress, ts bool, buf []byte) error { // NOTE: The bitcoin protocol uses a uint32 for the timestamp so it will // stop working somewhere around 2106. Also timestamp wasn't added until // until protocol version >= NetAddressTimeVersion. if ts && pver >= NetAddressTimeVersion { - err := writeElement(w, uint32(na.Timestamp.Unix())) - if err != nil { + littleEndian.PutUint32(buf[:4], uint32(na.Timestamp.Unix())) + if _, err := w.Write(buf[:4]); err != nil { return err } } + littleEndian.PutUint64(buf, uint64(na.Services)) + if _, err := w.Write(buf); err != nil { + return err + } + // Ensure to always write 16 bytes even if the ip is nil. var ip [16]byte if na.IP != nil { copy(ip[:], na.IP.To16()) } - err := writeElements(w, na.Services, ip) - if err != nil { + if _, err := w.Write(ip[:]); err != nil { return err } // Sigh. Bitcoin protocol mixes little and big endian. - return binary.Write(w, bigEndian, na.Port) + bigEndian.PutUint16(buf[:2], na.Port) + _, err := w.Write(buf[:2]) + + return err } diff --git a/wire/netaddressv2.go b/wire/netaddressv2.go index 15f7916456..ccad266ace 100644 --- a/wire/netaddressv2.go +++ b/wire/netaddressv2.go @@ -48,7 +48,7 @@ func maxNetAddressV2Payload() uint32 { plen += 1 // The largest address is 512 bytes. Even though it will not be a valid - // address, we should read and ignore it. The preceeding varint to + // address, we should read and ignore it. The preceding varint to // store 512 bytes is 3 bytes long. This gives us a total of 515 bytes. plen += 515 diff --git a/wire/protocol.go b/wire/protocol.go index 3b414ec3f1..baeec05369 100644 --- a/wire/protocol.go +++ b/wire/protocol.go @@ -60,6 +60,12 @@ const ( AddrV2Version uint32 = 70016 ) +const ( + // NodeNetworkLimitedBlockThreshold is the number of blocks that a node + // broadcasting SFNodeNetworkLimited MUST be able to serve from the tip. + NodeNetworkLimitedBlockThreshold = 288 +) + // ServiceFlag identifies services supported by a bitcoin peer. type ServiceFlag uint64 @@ -93,18 +99,23 @@ const ( // SFNode2X is a flag used to indicate a peer is running the Segwit2X // software. SFNode2X + + // SFNodeNetWorkLimited is a flag used to indicate a peer supports serving + // the last 288 blocks. + SFNodeNetworkLimited = 1 << 10 ) // Map of service flags back to their constant names for pretty printing. var sfStrings = map[ServiceFlag]string{ - SFNodeNetwork: "SFNodeNetwork", - SFNodeGetUTXO: "SFNodeGetUTXO", - SFNodeBloom: "SFNodeBloom", - SFNodeWitness: "SFNodeWitness", - SFNodeXthin: "SFNodeXthin", - SFNodeBit5: "SFNodeBit5", - SFNodeCF: "SFNodeCF", - SFNode2X: "SFNode2X", + SFNodeNetwork: "SFNodeNetwork", + SFNodeGetUTXO: "SFNodeGetUTXO", + SFNodeBloom: "SFNodeBloom", + SFNodeWitness: "SFNodeWitness", + SFNodeXthin: "SFNodeXthin", + SFNodeBit5: "SFNodeBit5", + SFNodeCF: "SFNodeCF", + SFNode2X: "SFNode2X", + SFNodeNetworkLimited: "SFNodeNetworkLimited", } // orderedSFStrings is an ordered list of service flags from highest to @@ -118,6 +129,12 @@ var orderedSFStrings = []ServiceFlag{ SFNodeBit5, SFNodeCF, SFNode2X, + SFNodeNetworkLimited, +} + +// HasFlag returns a bool indicating if the service has the given flag. +func (f ServiceFlag) HasFlag(s ServiceFlag) bool { + return f&s == s } // String returns the ServiceFlag in human-readable form. diff --git a/wire/protocol_test.go b/wire/protocol_test.go index 60bd0533e5..eeeffb600a 100644 --- a/wire/protocol_test.go +++ b/wire/protocol_test.go @@ -4,7 +4,11 @@ package wire -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) // TestServiceFlagStringer tests the stringized output for service flag types. func TestServiceFlagStringer(t *testing.T) { @@ -21,7 +25,8 @@ func TestServiceFlagStringer(t *testing.T) { {SFNodeBit5, "SFNodeBit5"}, {SFNodeCF, "SFNodeCF"}, {SFNode2X, "SFNode2X"}, - {0xffffffff, "SFNodeNetwork|SFNodeGetUTXO|SFNodeBloom|SFNodeWitness|SFNodeXthin|SFNodeBit5|SFNodeCF|SFNode2X|0xffffff00"}, + {SFNodeNetworkLimited, "SFNodeNetworkLimited"}, + {0xffffffff, "SFNodeNetwork|SFNodeGetUTXO|SFNodeBloom|SFNodeWitness|SFNodeXthin|SFNodeBit5|SFNodeCF|SFNode2X|SFNodeNetworkLimited|0xfffffb00"}, } t.Logf("Running %d tests", len(tests)) @@ -58,3 +63,19 @@ func TestBitcoinNetStringer(t *testing.T) { } } } + +func TestHasFlag(t *testing.T) { + tests := []struct { + in ServiceFlag + check ServiceFlag + want bool + }{ + {0, SFNodeNetwork, false}, + {SFNodeNetwork | SFNodeNetworkLimited | SFNodeWitness, SFNodeBloom, false}, + {SFNodeNetwork | SFNodeNetworkLimited | SFNodeWitness, SFNodeNetworkLimited, true}, + } + + for _, test := range tests { + require.Equal(t, test.want, test.in.HasFlag(test.check)) + } +} diff --git a/wire/testdata/block-0000000000000000001602407ac49862a7bca9d00f7f402db20b7be2f5de59d2.blk b/wire/testdata/block-0000000000000000001602407ac49862a7bca9d00f7f402db20b7be2f5de59d2.blk new file mode 100644 index 0000000000..aacdb7aa99 Binary files /dev/null and b/wire/testdata/block-0000000000000000001602407ac49862a7bca9d00f7f402db20b7be2f5de59d2.blk differ diff --git a/wire/testdata/block-00000000000000000021868c2cefc52a480d173c849412fe81c4e5ab806f94ab.blk b/wire/testdata/block-00000000000000000021868c2cefc52a480d173c849412fe81c4e5ab806f94ab.blk new file mode 100644 index 0000000000..cff5e0f35f Binary files /dev/null and b/wire/testdata/block-00000000000000000021868c2cefc52a480d173c849412fe81c4e5ab806f94ab.blk differ