diff --git a/checkpoints.go b/checkpoints.go new file mode 100644 index 000000000..7a105bb9e --- /dev/null +++ b/checkpoints.go @@ -0,0 +1,112 @@ +package neutrino + +import ( + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +// BlockHeaderCheckpoints manages header checkpoints for efficient +// header validation by providing methods to fetch and find checkpoints based +// on block height. +type BlockHeaderCheckpoints struct { + // checkpoints is a slice of Checkpoint structs from the blockchain's + // parameters. + checkpoints []chaincfg.Checkpoint + + // genesishash is the hash of the genesis block for the blockchain, + // serving as the initial checkpoint. + genesishash *chainhash.Hash +} + +// NewBlockHeaderCheckpoints creates a new BlockHeaderCheckpoints instance +// using the provided blockchain parameters, which include the genesis hash +// and predefined checkpoints. +func NewBlockHeaderCheckpoints(params chaincfg.Params) *BlockHeaderCheckpoints { + return &BlockHeaderCheckpoints{ + checkpoints: params.Checkpoints, + genesishash: params.GenesisHash, + } +} + +// FetchCheckpoint returns the height and hash of the checkpoint at the given +// index. Special indices -1 and -2 return no checkpoint and the genesis +// block respectively. +func (b *BlockHeaderCheckpoints) FetchCheckpoint(idx int32) (uint32, + *chainhash.Hash) { + + // Handle special index values for no checkpoint and genesis block. + if idx == -1 { + return 0, nil + } + if idx == -2 { + return 0, b.genesishash + } + + // Return the checkpoint corresponding to the provided index. + checkpoint := b.checkpoints[idx] + + return uint32(checkpoint.Height), checkpoint.Hash +} + +// FindNextHeaderCheckpoint finds the next checkpoint after the given height, +// returning its index, height, and hash. If no next checkpoint is found, +// it returns -1 for the index and nil for the hash. +func (b *BlockHeaderCheckpoints) FindNextHeaderCheckpoint(height uint32) ( + int32, uint32, *chainhash.Hash) { + + // TODO: handle no checkpoints case. + + checkpoints := b.checkpoints + + // If height is after the last checkpoint, return no next checkpoint. + finalCheckpoint := &checkpoints[len(checkpoints)-1] + if height >= uint32(finalCheckpoint.Height) { + return -1, 0, nil + } + + // Iterate to find the next checkpoint after the given height. + nextCheckpoint := finalCheckpoint + var index int32 + for i := int32(len(checkpoints) - 2); i >= 0; i-- { + if height >= uint32(checkpoints[i].Height) { + break + } + nextCheckpoint = &checkpoints[i] + index = i + } + + return index, uint32(nextCheckpoint.Height), nextCheckpoint.Hash +} + +// FindPreviousHeaderCheckpoint finds the latest checkpoint before the given +// height, returning its index, height, and hash. If the height is before any +// checkpoints, it returns the genesis block as the previous checkpoint. +func (b *BlockHeaderCheckpoints) FindPreviousHeaderCheckpoint(height uint32) ( + int32, uint32, *chainhash.Hash) { + + // Default to genesis block if no previous checkpoint is found. + prevCheckpointIndex := int32(-1) + prevCheckpointHeight := uint32(0) + prevCheckpointHash := b.genesishash + + // Iterate through checkpoints to find the previous checkpoint. + checkpoints := b.checkpoints + for i, checkpoint := range checkpoints { + if uint32(checkpoint.Height) < height { + prevCheckpointIndex = int32(i) + prevCheckpointHeight = uint32(checkpoint.Height) + prevCheckpointHash = checkpoint.Hash + } else { + // Checkpoints are sorted, so break on the first higher + // height. + break + } + } + + return prevCheckpointIndex, prevCheckpointHeight, prevCheckpointHash +} + +// Len returns the number of predefined checkpoints. +func (b *BlockHeaderCheckpoints) Len() int32 { + return int32(len(b.checkpoints)) +} diff --git a/go.mod b/go.mod index 9d5f1b921..836ff2a0b 100644 --- a/go.mod +++ b/go.mod @@ -10,9 +10,10 @@ require ( github.com/btcsuite/btcwallet/walletdb v1.3.5 github.com/btcsuite/btcwallet/wtxmgr v1.5.0 github.com/davecgh/go-spew v1.1.1 - github.com/lightninglabs/neutrino/cache v1.1.0 + github.com/lightninglabs/neutrino/cache v1.1.2 github.com/lightningnetwork/lnd/queue v1.0.1 - github.com/stretchr/testify v1.8.1 + github.com/lightningnetwork/lnd/tlv v1.2.3 + github.com/stretchr/testify v1.8.2 ) require ( @@ -26,11 +27,13 @@ require ( github.com/decred/dcrd/lru v1.0.0 // indirect github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 // indirect github.com/lightningnetwork/lnd/clock v1.0.1 // indirect + github.com/lightningnetwork/lnd/fn v1.0.4 // indirect github.com/lightningnetwork/lnd/ticker v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 // indirect - golang.org/x/crypto v0.1.0 // indirect - golang.org/x/sys v0.1.0 // indirect + golang.org/x/crypto v0.7.0 // indirect + golang.org/x/exp v0.0.0-20231226003508-02704c960a9b // indirect + golang.org/x/sys v0.13.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index aec8781e4..0ccb9f936 100644 --- a/go.sum +++ b/go.sum @@ -70,19 +70,23 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lightninglabs/neutrino/cache v1.1.0 h1:szZIhVabiQIsGzJjhvo76sj05Au+zVotj2M34EquGME= -github.com/lightninglabs/neutrino/cache v1.1.0/go.mod h1:XJNcgdOw1LQnanGjw8Vj44CvguYA25IMKjWFZczwZuo= +github.com/lightninglabs/neutrino/cache v1.1.2 h1:C9DY/DAPaPxbFC+xNNEI/z1SJY9GS3shmlu5hIQ798g= +github.com/lightninglabs/neutrino/cache v1.1.2/go.mod h1:XJNcgdOw1LQnanGjw8Vj44CvguYA25IMKjWFZczwZuo= github.com/lightningnetwork/lnd/clock v1.0.1 h1:QQod8+m3KgqHdvVMV+2DRNNZS1GRFir8mHZYA+Z2hFo= github.com/lightningnetwork/lnd/clock v1.0.1/go.mod h1:KnQudQ6w0IAMZi1SgvecLZQZ43ra2vpDNj7H/aasemg= +github.com/lightningnetwork/lnd/fn v1.0.4 h1:n4iGRRoS+XHqNbOrsXIvweps/QfWk+moO7FiYPPFI8k= +github.com/lightningnetwork/lnd/fn v1.0.4/go.mod h1:K9gbvdl5z4XmRcqWUVqvvVcuRKtmq9BNQ+cWYlk+vjw= github.com/lightningnetwork/lnd/queue v1.0.1 h1:jzJKcTy3Nj5lQrooJ3aaw9Lau3I0IwvQR5sqtjdv2R0= github.com/lightningnetwork/lnd/queue v1.0.1/go.mod h1:vaQwexir73flPW43Mrm7JOgJHmcEFBWWSl9HlyASoms= github.com/lightningnetwork/lnd/ticker v1.0.0 h1:S1b60TEGoTtCe2A0yeB+ecoj/kkS4qpwh6l+AkQEZwU= github.com/lightningnetwork/lnd/ticker v1.0.0/go.mod h1:iaLXJiVgI1sPANIF2qYYUJXjoksPNvGNYowB8aRbpX0= +github.com/lightningnetwork/lnd/tlv v1.2.3 h1:If5ibokA/UoCBGuCKaY6Vn2SJU0l9uAbehCnhTZjEP8= +github.com/lightningnetwork/lnd/tlv v1.2.3/go.mod h1:zDkmqxOczP6LaLTvSFDQ1SJUfHcQRCMKFj93dn3eMB8= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -94,6 +98,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -101,8 +106,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 h1:ASw9n1EHMftwnP3Az4XW6e308+gNsrHzmdhd0Olz9Hs= @@ -111,8 +116,10 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4= +golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -131,8 +138,8 @@ golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -147,8 +154,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/log.go b/log.go index 5e57fbbc3..9ed37a0a1 100644 --- a/log.go +++ b/log.go @@ -12,6 +12,7 @@ import ( "github.com/lightninglabs/neutrino/filterdb" "github.com/lightninglabs/neutrino/pushtx" "github.com/lightninglabs/neutrino/query" + "github.com/lightninglabs/neutrino/sideload" ) // log is a logger that is initialized with no output filters. This @@ -45,4 +46,5 @@ func UseLogger(logger btclog.Logger) { query.UseLogger(logger) filterdb.UseLogger(logger) chanutils.UseLogger(logger) + sideload.UseLogger(logger) } diff --git a/neutrino.go b/neutrino.go index ef36f42af..2d77a00a5 100644 --- a/neutrino.go +++ b/neutrino.go @@ -6,6 +6,7 @@ package neutrino import ( "errors" "fmt" + "io" "net" "strconv" "strings" @@ -28,8 +29,10 @@ import ( "github.com/lightninglabs/neutrino/chanutils" "github.com/lightninglabs/neutrino/filterdb" "github.com/lightninglabs/neutrino/headerfs" + "github.com/lightninglabs/neutrino/headerlist" "github.com/lightninglabs/neutrino/pushtx" "github.com/lightninglabs/neutrino/query" + "github.com/lightninglabs/neutrino/sideload" ) // These are exported variables so they can be changed by users. @@ -84,6 +87,8 @@ var ( // DefaultBlockCacheSize is the size (in bytes) of blocks neutrino will // keep in memory if no size is specified in the neutrino.Config. DefaultBlockCacheSize uint64 = 4096 * 10 * 1000 // 40 MB + + SideloadRange uint32 = 2000 ) // isDevNetwork indicates if the chain is a private development network, namely @@ -113,6 +118,18 @@ type peerState struct { outboundGroups map[string]int } +type SideloadOpt struct { + // SourceType is the format type of the sideload source. + SourceType sideload.SourceType + + // Reader is the sideload's source. + Reader io.ReadSeeker + + SkipVerify bool + + SideloadRange uint32 +} + // Count returns the count of all known peers. func (ps *peerState) Count() int { return len(ps.outboundPeers) + len(ps.persistentPeers) @@ -618,6 +635,8 @@ type Config struct { // not, replies with a getdata message. // 3. Neutrino sends the raw transaction. BroadcastTimeout time.Duration + + BlkHdrSideload *SideloadOpt } // peerSubscription holds a peer subscription which we'll notify about any @@ -678,6 +697,8 @@ type ChainService struct { // nolint:maligned dialer func(net.Addr) (net.Conn, error) broadcastTimeout time.Duration + + BlockHeaderSideloader *sideload.SideLoader[*wire.BlockHeader] } // NewChainService returns a new chain service configured to connect to the @@ -814,6 +835,46 @@ func NewChainService(cfg Config) (*ChainService, error) { return nil, err } s.blockManager = bm + + if cfg.BlkHdrSideload != nil { + blockHdrChkptMgr := NewBlockHeaderCheckpoints(s.chainParams) + blockHeaderWriter := &blkHeaderWriter{ + store: s.BlockHeaders, + tipHeight: s.blockManager.headerTip, + } + + loader := sideload.LoaderInternal[*wire.BlockHeader]{ + HeaderWriter: blockHeaderWriter, + HeaderValidator: &blockHeaderValidator{ + BlockHeaderCheckpoints: blockHdrChkptMgr, + headerList: bm.headerList, + minRetargetTimespan: bm.minRetargetTimespan, + maxRetargetTimespan: bm.maxRetargetTimespan, + blocksPerRetarget: bm.blocksPerRetarget, + ChainParams: s.chainParams, + TimeSource: s.timeSource, + nextCheckpoint: bm.nextCheckpoint, + store: s.BlockHeaders, + }, + SkipVerify: cfg.BlkHdrSideload.SkipVerify, + Chkpt: blockHdrChkptMgr, + SideloadRange: SideloadRange, + } + + blkHdrSideload, err := sideload.NewBlockHeaderLoader( + &sideload.LoaderConfig[*wire.BlockHeader]{ + LoaderInternal: loader, + SourceType: cfg.BlkHdrSideload.SourceType, + Reader: cfg.BlkHdrSideload.Reader, + Chain: s.chainParams.Net, + }) + if err != nil { + return nil, err + } + + s.BlockHeaderSideloader = blkHdrSideload + } + s.blockSubscriptionMgr = blockntfns.NewSubscriptionManager(s.blockManager) // Only setup a function to return new addresses to connect to when not @@ -1605,8 +1666,13 @@ func (s *ChainService) Start() error { return nil } - // Start the address manager and block manager, both of which are - // needed by peers. + if s.BlockHeaderSideloader != nil { + err := s.BlockHeaderSideloader.Load() + if err != nil { + log.Warnf("error while sideloading: %v", err) + } + } + s.addrManager.Start() s.blockManager.Start() s.blockSubscriptionMgr.Start() @@ -1763,3 +1829,140 @@ func (o *onionAddr) Network() string { // Ensure onionAddr implements the net.Addr interface. var _ net.Addr = (*onionAddr)(nil) + +type blockHeaderValidator struct { + *BlockHeaderCheckpoints + headerList headerlist.Chain + minRetargetTimespan int64 // target timespan / adjustment factor + maxRetargetTimespan int64 // target timespan * adjustment factor + blocksPerRetarget int32 // target timespan / target time per block + // ChainParams is the chain that we're running on. + ChainParams chaincfg.Params + // TimeSource is used to access a time estimate based on the clocks of + // the connected peers. + TimeSource blockchain.MedianTimeSource + nextCheckpoint *chaincfg.Checkpoint + store headerfs.BlockHeaderStore +} + +func (b *blockHeaderValidator) Verify(headers []*wire.BlockHeader) bool { + if !areHeadersConnected(headers) { + log.Debug("headers do not connect") + + return false + } + + var node headerlist.Node + for _, header := range headers { + prevNode := b.headerList.Back() + prevHash := prevNode.Header.BlockHash() + if prevHash.IsEqual(&header.PrevBlock) { + err := b.checkHeaderSanity( + header, b.headerList, prevNode.Height, + &prevNode.Header, + ) + + if err != nil { + log.Debugf("failed sanity check: %v", err) + + return false + } + + node = headerlist.Node{ + Header: *header, + Height: prevNode.Height + 1, + } + b.headerList.PushBack(node) + } + } + + // Verify the header at the next checkpoint height matches. + if b.nextCheckpoint != nil && node.Height == b.nextCheckpoint.Height { + nodeHash := node.Header.BlockHash() + if nodeHash.IsEqual(b.nextCheckpoint.Hash) { + log.Infof("Verified downloaded block "+ + "header against checkpoint at height "+ + "%d/hash %s", node.Height, nodeHash) + } else { + return false + } + } + + return true +} + +func (b *blockHeaderValidator) VerifyCheckpoint( + nextcheckpt *chainhash.Hash, headers []*wire.BlockHeader, + prevCheckpt *chainhash.Hash) bool { + + return true +} + +// checkHeaderSanity performs contextual and context-less checks on the passed +// wire.BlockHeader. This function calls blockchain.CheckBlockHeaderContext for +// the contextual check and blockchain.CheckBlockHeaderSanity for context-less +// checks. Copied from the blockamanager, line 2738. +func (b *blockHeaderValidator) checkHeaderSanity(blockHeader *wire.BlockHeader, + hList headerlist.Chain, prevNodeHeight int32, + prevNodeHeader *wire.BlockHeader) error { + + parentHeaderCtx := newLightHeaderCtx( + prevNodeHeight, prevNodeHeader, b.store, hList, + ) + + // Create a lightChainCtx as well. + chainCtx := newLightChainCtx( + &b.ChainParams, b.blocksPerRetarget, b.minRetargetTimespan, + b.maxRetargetTimespan, + ) + + var emptyFlags blockchain.BehaviorFlags + err := blockchain.CheckBlockHeaderContext( + blockHeader, parentHeaderCtx, emptyFlags, chainCtx, true, + ) + if err != nil { + return err + } + + return blockchain.CheckBlockHeaderSanity( + blockHeader, b.ChainParams.PowLimit, b.TimeSource, + emptyFlags, + ) +} + +type blkHeaderWriter struct { + store headerfs.BlockHeaderStore + tipHeight uint32 +} + +func (b *blkHeaderWriter) ChainTip() (*chainhash.Hash, uint32, error) { + header, height, err := b.store.ChainTip() + + hash := header.BlockHash() + + return &hash, height, err +} + +func (b *blkHeaderWriter) Write(headers []*wire.BlockHeader) error { + headerWriteBatch := make([]headerfs.BlockHeader, 0, len(headers)) + + tipHeight := b.tipHeight + for _, header := range headers { + tipHeight++ + headerWriteBatch = append( + headerWriteBatch, headerfs.BlockHeader{ + BlockHeader: header, + Height: tipHeight, + }, + ) + } + + err := b.store.WriteHeaders(headerWriteBatch...) + if err != nil { + return err + } + + b.tipHeight = tipHeight + + return nil +} diff --git a/sideload/binary.go b/sideload/binary.go new file mode 100644 index 000000000..61609be7b --- /dev/null +++ b/sideload/binary.go @@ -0,0 +1,242 @@ +package sideload + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/tlv" +) + +const ( + // blockHeaderSize is the size one block header occupies in bytes. + blockHeaderSize dataSize = 80 +) + +// binReader is an internal struct that holds all data the binReader needs +// to fetch headers. +// Each file has a header of varint size consisting of, +// +// dataType || extra metadata (e.g. filterType) || startHeight || endHeight +// || chain +// in that order. +type binReader struct { + // reader represents the source to be read. + reader io.ReadSeeker + + // startHeight represents the height of the first header in the file. + startHeight uint32 + + // endHeight represents the height of the last header in the file. + endHeight uint32 + + // offset represents the distance required to read the first header from + // the file. + initialOffset int64 + + // chain represents the bitcoin network the headers in the file belong + // to. + chain wire.BitcoinNet + + // dataSize is the size, one header occupies in the file, in bytes. + dataSize dataSize +} + +// blkHdrBinReader is specialized for reading and decoding binary block headers. +// It embeds a general binary reader (binReader) and includes a specific decoder +// for block headers. +type blkHdrBinReader struct { + // binReader is a general binary data reader configured for blockchain data. + *binReader + + // headerDecoder is a function tailored to decode binary block headers. + headerDecoder[*wire.BlockHeader] +} + +// newBinaryBlkHdrLoader initializes a blkHdrBinReader with the given +// configuration. It sets up the binary loader for block headers specifically, +// using a provided configuration that includes the data source and +// pre-configures it to read block header data. +func newBinaryBlkHdrLoader(r io.ReadSeeker) (*blkHdrBinReader, error) { + b, err := newBinaryLoader(r, BlockHeaders) + if err != nil { + return nil, err + } + + return &blkHdrBinReader{ + binReader: b, + headerDecoder: blkHdrDecoder, + }, nil +} + +// newBinaryChainDataLoader initializes a Binary Loader. +func newBinaryLoader(reader io.ReadSeeker, + typeData dataType) (*binReader, error) { + + // Ensure we start reading from the beginning. + _, err := reader.Seek(0, io.SeekStart) + if err != nil { + return nil, fmt.Errorf("error seeking reader from start") + } + + // Create scratch buffer. + scratch := [8]byte{} + + typeOfData, err := tlv.ReadVarInt(reader, &scratch) + if err != nil { + return nil, fmt.Errorf("error obtaining data type "+ + "of file %w", err) + } + + if dataType(typeOfData) != typeData { + return nil, fmt.Errorf("data type mismatch: got %v but "+ + "expected %v", dataType(typeOfData), typeData) + } + + var headerSize dataSize + switch typeData { + case BlockHeaders: + headerSize = blockHeaderSize + default: + return nil, errors.New("unsupported header type") + } + + // Read start height of block header file. + start, err := tlv.ReadVarInt(reader, &scratch) + if err != nil { + return nil, fmt.Errorf("error obtaining start height "+ + "of file %w", err) + } + + // Read end height of block header file. + end, err := tlv.ReadVarInt(reader, &scratch) + if err != nil { + return nil, fmt.Errorf("error obtaining end height of "+ + "file %w", err) + } + + // Read the bitcoin network, the headers in the header file belong to. + chainChar, err := tlv.ReadVarInt(reader, &scratch) + if err != nil { + return nil, fmt.Errorf("error obtaining chain of "+ + "file %w", err) + } + + // obtain space occupied by metadata as initial offset + initialOffset, err := reader.Seek(0, io.SeekCurrent) + if err != nil { + return nil, fmt.Errorf("unable to determine initial "+ + "offset: %v", err) + } + + return &binReader{ + reader: reader, + startHeight: uint32(start), + endHeight: uint32(end), + initialOffset: initialOffset, + chain: wire.BitcoinNet(chainChar), + dataSize: headerSize, + }, nil +} + +// headerDecoder type serializes the passed byte to the required header type. +type headerDecoder[T header] func([]byte) (T, error) + +// blkHdrDecoder serializes the passed data in bytes to *wire.BlockHeader type. +func blkHdrDecoder(data []byte) (*wire.BlockHeader, error) { + var blockHeader wire.BlockHeader + + headerReader := bytes.NewReader(data) + + // Finally, decode the raw bytes into a proper bitcoin header. + if err := blockHeader.Deserialize(headerReader); err != nil { + return nil, fmt.Errorf("error deserializing block "+ + "header: %w", err) + } + + return &blockHeader, nil +} + +// readHeaders fetches headers from the binary file. +func readHeaders[T header](numHeaders uint32, reader io.ReadSeeker, + dataTypeSize dataSize, + decoder headerDecoder[T]) ([]T, error) { + + hdrs := make([]T, 0, numHeaders) + for i := uint32(0); i < numHeaders; i++ { + rawData := make([]byte, dataTypeSize) + + if _, err := reader.Read(rawData); err != nil { + if err == io.EOF { + break + } + return nil, err + } + + hdr, err := decoder(rawData) + + if err != nil { + return nil, err + } + hdrs = append(hdrs, hdr) + } + return hdrs, nil +} + +// FetchHeaders retrieves a specified number of block headers from the +// blkHdrBinReader's underlying data source. It utilizes a generic function, +// readHeaders, to perform the actual reading and decoding based on the +// blkHdrBinReader's configuration. +func (b *blkHdrBinReader) FetchHeaders(numHeaders uint32) ([]*wire.BlockHeader, + error) { + + return readHeaders[*wire.BlockHeader]( + numHeaders, b.reader, b.dataSize, b.headerDecoder, + ) +} + +// EndHeight function returns the height of the last header in the file. +func (b *binReader) EndHeight() uint32 { + return b.endHeight +} + +// StartHeight function returns the height of the first header in the file. +func (b *binReader) StartHeight() uint32 { + return b.startHeight +} + +// HeadersChain function returns the network the headers in the file belong to. +func (b *binReader) HeadersChain() wire.BitcoinNet { + return b.chain +} + +// SetHeight sets the reader's position for fetching headers from a specific +// block height. If height is -1, it resets to the start. Validates height +// within available range and adjusts the reader to the correct offset. +// Returns an error if the height is out of range or on seek failure. +func (b *binReader) SetHeight(height int32) error { + if height == -1 { + _, err := b.reader.Seek(b.initialOffset, io.SeekStart) + if err != nil { + return ErrSetSeek(err) + } + + return nil + } + + if height < int32(b.startHeight) || height >= int32(b.endHeight) { + return errors.New("height out of range") + } + + offset := int64(height-int32(b.startHeight))*int64(b.dataSize) + + b.initialOffset + + _, err := b.reader.Seek(offset, io.SeekStart) + if err != nil { + return ErrSetSeek(err) + } + + return nil +} diff --git a/sideload/binary_test.go b/sideload/binary_test.go new file mode 100644 index 000000000..0103da1ea --- /dev/null +++ b/sideload/binary_test.go @@ -0,0 +1,47 @@ +package sideload + +import ( + "testing" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/integration/rpctest" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" +) + +// testNewBinaryBlkHdrLoader sets up a test environment and creates a new binary +// block header loader for testing purposes. It utilizes the provided TestCfg +// to configure the test parameters, including the start and end heights for +// the block headers to be generated. The function performs the following steps: +// 1. Initializes a new rpctest harness with the SimNetParams to simulate +// a Bitcoin network environment for testing. +// 2. Sets up the test harness, ensuring it's ready for block generation. +// 3. Generates a sequence of valid block headers based on the TestCfg parameters. +// 4. Encodes these headers into a binary format, simulating the kind of data +// a real binary block header loader might process. +// 5. Instantiates a new binaryBlkHdrLoader with the encoded binary data. +// 6. Returns the loader as a LoaderSource instance, ready for use in tests. +func testNewBinaryBlkHdrLoader(t *testing.T, + testBin *TestCfg) LoaderSource[*wire.BlockHeader] { + + harness, err := rpctest.New( + &chaincfg.SimNetParams, nil, []string{"--txindex"}, "", + ) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, harness.TearDown()) + }) + + err = harness.SetUp(false, 0) + require.NoError(t, err) + + headers := GenerateValidBlockHeaders( + uint32(testBin.EndHeight-testBin.StartHeight), harness, t, + ) + bLoader, err := newBinaryBlkHdrLoader(GenerateEncodedBinaryReader( + t, testBin, headers), + ) + require.NoError(t, err) + + return bLoader +} diff --git a/sideload/log.go b/sideload/log.go new file mode 100644 index 000000000..91a951bc4 --- /dev/null +++ b/sideload/log.go @@ -0,0 +1,26 @@ +package sideload + +import "github.com/btcsuite/btclog" + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until either UseLogger or SetLogWriter are called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/sideload/sideload.go b/sideload/sideload.go new file mode 100644 index 000000000..cf77c674e --- /dev/null +++ b/sideload/sideload.go @@ -0,0 +1,393 @@ +package sideload + +import ( + "errors" + "fmt" + "io" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" +) + +var ( + // ErrUnsupportedSourceType is an error message returned if a sideload + // source is encountered that is not recognized or cannot be handled. + ErrUnsupportedSourceType = errors.New("unsupported source type") + + // ErrBitcoinNetworkMismatchFmt is a function that formats an error + // message for a mismatch between the bitcoin network of the chain and + // the sideload's network. + ErrBitcoinNetworkMismatchFmt = func( + cfgChain wire.BitcoinNet, headerChain wire.BitcoinNet) error { + return fmt.Errorf("bitcoin network mismatch, chain's "+ + "network: %v, sideload's network: %v", + cfgChain, headerChain.String()) + } + + // CompletedSideloadMsg is message that indicates that sideloading + // has beeen completed. + CompletedSideloadMsg = func(tipHeight uint32) string { + return fmt.Sprintf("Completed sideloading at height %v", tipHeight) + } + + ErrSetSeek = func(err error) error { + return fmt.Errorf("unable to set seek for Loader: %w", err) + } +) + +// SourceType is a type that indicates the encoding format of the +// sideload source. +type SourceType uint8 + +const ( + // Binary indicates that the sideload source is in binary encoding + // format. + Binary SourceType = 0 +) + +// dataType indicates the type of data stored by the sideload source. +type dataType uint8 + +// dataSize is a type indicating the size in bytes a single unit of data +// handled by the sideload source occupies. +type dataSize uint32 + +const ( + // BlockHeaders is a data type that indicates the data stored is + // *wire.BlockHeaders. + BlockHeaders dataType = 0 +) + +// header represents the headers handled by the package. +type header interface { + *wire.BlockHeader +} + +// HeaderValidator defines an interface for validating blockchain headers, +// parameterized over a type T, which represents a header. The type T must +// satisfy the 'header' constraint. +type HeaderValidator[T header] interface { + // VerifyCheckpoint checks the validity of headers between two + // checkpoints. It accepts a pointer to a chainhash.Hash for the + // previous checkpoint, a slice of headers of type T, and a pointer to a + // chainhash.Hash for the next checkpoint. It returns true if the + // headers are valid, false otherwise. + VerifyCheckpoint(prevCheckpoint *chainhash.Hash, headers []T, + nextCheckpoint *chainhash.Hash) bool + + // Verify assesses the validity of a slice of headers of type T. + // It returns true if the headers pass the validation rules, false + // otherwise. + Verify([]T) bool +} + +// HeaderWriter defines an interface for persisting blockchain headers to a +// storage layer. It is parameterized over a type T, which represents a +// blockchain header. The type T must satisfy the 'header' constraint, +// indicating it is a header type. +type HeaderWriter[T header] interface { + // ChainTip returns the hash and height of the most recently known + // header in the storage. + ChainTip() (*chainhash.Hash, uint32, error) + + // Write takes a slice of headers of type T and persists them to the + // storage layer. It returns an error if any. + Write([]T) error +} + +// Checkpoints provides methods for accessing header checkpoints. +type Checkpoints interface { + // FetchCheckpoint returns the checkpoint height and hash at a given + // index. + FetchCheckpoint(idx int32) (height uint32, hash *chainhash.Hash) + + // FindNextHeaderCheckpoint locates the next checkpoint after a given + // height, returning its index, header height and hash. + FindNextHeaderCheckpoint(curHeight uint32) (index int32, height uint32, + hash *chainhash.Hash) + + // FindPreviousHeaderCheckpoint locates the previous checkpoint before a + // given height, returning its index, header height and hash. + FindPreviousHeaderCheckpoint(curHeight uint32) (index int32, + height uint32, hash *chainhash.Hash) + + // Len returns the total number of checkpoints. + Len() int32 +} + +// LoaderSource defines methods for retrieving data from a source. +type LoaderSource[T header] interface { + // EndHeight returns the height of the last header in the sideload + // source. + EndHeight() uint32 + + // StartHeight returns the height of the first header in the sideload + // source. + StartHeight() uint32 + + // HeadersChain identifies the Bitcoin network associated with the + // headers in the sideload source. + HeadersChain() wire.BitcoinNet + + // FetchHeaders retrieves a specified number of headers starting from + // the current set height. + FetchHeaders(numHeaders uint32) ([]T, error) + + // SetHeight specifies the height from which the caller intends to start + // reading headers. It returns an error, if any. + SetHeight(int32) error +} + +// LoaderInternal integrates header writing and validation functionalities, +// tailored for processing blockchain headers. It employs generics to work +// with a specific header type that meets the 'header' interface constraints. +type LoaderInternal[T header] struct { + // HeaderWriter provides methods to persist headers into the storage layer. + HeaderWriter[T] + + // HeaderValidator offers validation capabilities for blockchain headers, + // ensuring they adhere to specific rules and checkpoints. + HeaderValidator[T] + + // SkipVerify indicates whether header validation should be bypassed. + // True means validation is skipped; false means validation is performed. + SkipVerify bool + + // Chkpt holds methods for accessing header checkpoints used during the + // header validation process to ensure headers align with known valid points + // in the blockchain. + Chkpt Checkpoints + + // SideloadRange the number of headers that are fetched from the sideload + // source at a time. + SideloadRange uint32 +} + +// LoaderConfig holds configuration details necessary for initializing +// a Loader. It is generic over a type T, which represents a blockchain +// header and must satisfy the 'header' constraint, indicating its +// suitability for header-related operations. +type LoaderConfig[T header] struct { + // LoaderInternal embeds functionalities for writing and validating + // headers, as well as additional settings related to the validation + // process and sideloading capabilities. + LoaderInternal[T] + + // SourceType identifies the encoding format of the sideload source, + // indicating how sideloaded data should be interpreted. + SourceType SourceType + + // Reader provides an io.ReadSeeker interface to the sideload source. + Reader io.ReadSeeker + + // Chain specifies the Bitcoin network for which the loader's caller is + // configured, distinguishing between different network types + // (e.g., mainnet, testnet). + Chain wire.BitcoinNet +} + +// SideLoader manages the sideloading of headers. It is generic over a type T, +// which represents a blockchain header and must satisfy the 'header' +// constraint. +type SideLoader[T header] struct { + // LoaderInternal contains basic loader functionalities such as + // writing and validating headers, along with configurations for + // sideloading and validation processes. + LoaderInternal[T] + + // source represents the data source from which headers are sideloaded. + // It is specific to the sideloading process, encapsulating the logic + // for fetching and decoding sideloaded headers. + source LoaderSource[T] + + // chkptTracker keeps track of the current checkpoint index during the + // sideloading process. + chkptTracker int32 + + // haltHeight specifies a blockchain height at which the sideloading + // process should be halted. + haltHeight uint32 + + // haltHeightFunc is a function that dynamically determines the halt height + // for the sideloading process. + haltHeightFunc func() uint32 +} + +// NewBlockHeaderLoader initializes a block header Loader based on the source +// type of the reader config. +func NewBlockHeaderLoader( + cfg *LoaderConfig[*wire.BlockHeader]) (*SideLoader[*wire.BlockHeader], + error) { + + var ( + source LoaderSource[*wire.BlockHeader] + err error + ) + + switch { + case cfg.SourceType == Binary: + source, err = newBinaryBlkHdrLoader(cfg.Reader) + if err != nil { + return nil, err + } + default: + return nil, ErrUnsupportedSourceType + } + + headerChain := source.HeadersChain() + + if headerChain != cfg.Chain { + return nil, ErrBitcoinNetworkMismatchFmt( + cfg.Chain, headerChain, + ) + } + + return &SideLoader[*wire.BlockHeader]{ + LoaderInternal: cfg.LoaderInternal, + source: source, + haltHeightFunc: func() uint32 { + return source.EndHeight() + }, + }, nil +} + +// obtainRangeFetchFunc returns the next number of headers to be fetched and +// the next checkpoint header and checkpoint height, if skipVerify is false. +func (s *SideLoader[T]) obtainRangeFetchFunc() func( + curHeight uint32) (uint32, *chainhash.Hash) { + + // If we are to verify headers and the length of checkpoints is + // greater than zero, we want to fetch headers from our current height to + // our next checkpoint. + if !s.SkipVerify && s.Chkpt.Len() > 0 { + return func(curHeight uint32) (uint32, *chainhash.Hash) { + chkpt := s.Chkpt + + // Check if idx is out of range + if s.chkptTracker >= chkpt.Len() { + return 0, nil + } + checkpointHeight, checkpointHash := chkpt. + FetchCheckpoint(s.chkptTracker) + + if checkpointHeight > s.source.EndHeight() { + return curHeight, nil + } + + s.chkptTracker++ + + return checkpointHeight, checkpointHash + } + } + + // If we do not want to Verify headers we use whatever + // `sideloadRange` was given to us by the caller. + return func(curHeight uint32) (uint32, *chainhash.Hash) { + if curHeight >= s.haltHeight { + return curHeight, nil + } + if curHeight+s.SideloadRange > s.haltHeight { + return s.haltHeight - curHeight, nil + } + + return curHeight + s.SideloadRange, nil + } +} + +// Load sideloads headers into the neutrino system. +func (s *SideLoader[T]) Load() error { + // Fetch the height at which we stop sideloading. + haltHeight := s.haltHeightFunc() + s.haltHeight = haltHeight + + _, tipHeight, err := s.HeaderWriter.ChainTip() + if err != nil { + return err + } + + source := s.source + + err = source.SetHeight(int32(tipHeight)) + if err != nil { + log.Infof("Nothing to load, chain height: %v, "+ + "source end height: %v", tipHeight, source.EndHeight()) + + return nil + } + + var prevCheckpt *chainhash.Hash + if s.Chkpt.Len() > 0 { + idx, _, _ := s.Chkpt.FindNextHeaderCheckpoint(tipHeight) + s.chkptTracker = idx + + _, _, prevCheckpt = s.Chkpt.FindPreviousHeaderCheckpoint(tipHeight) + + } + + fetchRange := s.obtainRangeFetchFunc() + + log.Infof("Sideloading headers from height %v", tipHeight) + for tipHeight < haltHeight { + chkpointHeight, chkpointHash := fetchRange(tipHeight) + fetchSize := chkpointHeight - tipHeight + + log.Infof("chkpoint height %v", chkpointHeight) + if fetchSize == 0 { + log.Infof(CompletedSideloadMsg(tipHeight)) + + return nil + } + + headers, err := s.source.FetchHeaders(fetchSize) + if err != nil { + return err + } + + if len(headers) == 0 { + log.Infof(CompletedSideloadMsg(tipHeight)) + + return nil + } + + if !s.SkipVerify { + if !s.Verify(headers) { + log.Warnf("headers failed verification at "+ + "height: %v", tipHeight) + + return nil + } + + if s.Chkpt.Len() > 0 { + if !s.VerifyCheckpoint( + prevCheckpt, headers[:len(headers)-1], + chkpointHash, + ) { + + log.Warnf("headers failed checkpoint "+ + "verification at height: "+ + "%v", tipHeight) + + return nil + } + } + } + + err = s.Write(headers) + if err != nil { + return err + } + + // Update tip. + _, tipHeight, err = s.ChainTip() + if err != nil { + return err + } + + // Update previous checkpoint hash. + prevCheckpt = chkpointHash + } + + log.Infof(CompletedSideloadMsg(tipHeight)) + + return nil +} diff --git a/sideload/sideload_test.go b/sideload/sideload_test.go new file mode 100644 index 000000000..47d6fb389 --- /dev/null +++ b/sideload/sideload_test.go @@ -0,0 +1,95 @@ +package sideload + +import ( + "testing" + + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" +) + +type TestLoaderSourceFunc func(*testing.T, *TestCfg) LoaderSource[*wire. + BlockHeader] + +// TestLoaderSource tests that the LoaderSource implementations work as +// expected. +func TestLoaderSource(t *testing.T) { + sourceFuncs := []TestLoaderSourceFunc{ + testNewBinaryBlkHdrLoader, + } + + // Initialize a test configuration for a sideload source. This + // configuration specifies a sideload source containing block headers + // ranging from height 2 to height 10, all belonging to the regtest + // network. The data type specified for this test is set to + // BlockHeaders, indicating that the source contains block headers. + test := &TestCfg{ + StartHeight: 2, + EndHeight: 10, + Net: wire.TestNet3, + DataType: BlockHeaders, + } + + for _, sourceFunc := range sourceFuncs { + source := sourceFunc(t, test) + + require.Equal(t, test.StartHeight, uint64(source.StartHeight())) + + require.Equal(t, test.EndHeight, uint64(source.EndHeight())) + + require.Equal(t, test.Net, source.HeadersChain()) + + // Fetch all headers from the reader. + fetchSize := uint32(test.EndHeight - test.StartHeight) + headers, err := source.FetchHeaders(fetchSize) + require.NoError(t, err) + + // Store first header in the variable, + // we would use it for testing later. + firstHeader := headers[0] + + // We should obtain a length of headers equal to the number + // of headers requested. + require.Len(t, headers, int(fetchSize)) + + headers, err = source.FetchHeaders(fetchSize) + require.NoError(t, err) + + // We should not be able to fetch more headers as our + // reader's seeker is at its end. + require.Len(t, headers, 0) + + // We expect an error when setting the reader to fetch a + // header at a height that we do not have. + err = source.SetHeight(0) + require.Error(t, err) + + // Now we have set the reader at a header height which it has, + // We should be able to fetch more headers. + // + // Setting the source to read from height 3 upwards. + err = source.SetHeight(2) + require.NoError(t, err) + + fetchSize = uint32(test.EndHeight - 3) + headers, err = source.FetchHeaders(fetchSize) + require.NoError(t, err) + + require.Len(t, headers, int(fetchSize)) + + // Setting it at -1, enabling it to read all ( + // endHeight - startHeight) header. + err = source.SetHeight(-1) + require.NoError(t, err) + + fetchSize = uint32(test.EndHeight - test.StartHeight) + headers, err = source.FetchHeaders(fetchSize) + require.NoError(t, err) + + require.Len(t, headers, int(fetchSize)) + + // Since we set the reader to its first height, + // the first header we got previously should be the same as + // the one that we have now. + require.Equal(t, firstHeader, headers[0]) + } +} diff --git a/sideload/test_utils.go b/sideload/test_utils.go new file mode 100644 index 000000000..9f42e279a --- /dev/null +++ b/sideload/test_utils.go @@ -0,0 +1,141 @@ +package sideload + +import ( + "bytes" + "io" + "os" + "sync" + "testing" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/integration/rpctest" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/tlv" + "github.com/stretchr/testify/require" +) + +func GenerateEncodedBinaryReader(t *testing.T, c *TestCfg, + blkHdrsByte []byte) io.ReadSeeker { + + encodedOsFile, err := os.CreateTemp("", "temp") + require.NoError(t, err) + + t.Cleanup(func() { + require.NoError(t, encodedOsFile.Close()) + + require.NoError(t, os.Remove(encodedOsFile.Name())) + }) + + require.NoError( + t, tlv.WriteVarInt(encodedOsFile, uint64(c.DataType), + &[8]byte{}), + ) + + require.NoError( + t, tlv.WriteVarInt(encodedOsFile, c.StartHeight, &[8]byte{}), + ) + + require.NoError( + t, tlv.WriteVarInt(encodedOsFile, c.EndHeight, &[8]byte{}), + ) + + require.NoError( + t, tlv.WriteVarInt(encodedOsFile, uint64(c.Net), &[8]byte{}), + ) + + lengthofWrittenBytes, err := encodedOsFile.Write(blkHdrsByte) + require.NoError(t, err) + require.Equal(t, lengthofWrittenBytes, len(blkHdrsByte)) + + // Reset to the beginning of the file. + _, err = encodedOsFile.Seek(0, io.SeekStart) + require.NoError(t, err) + + return encodedOsFile +} + +var headerBufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +func GenerateValidBlockHeaders(numHeaders uint32, harness *rpctest.Harness, + t *testing.T) []byte { + + // Generate 200 valid blocks that we then feed to the block manager. + blockHashes, err := harness.Client.Generate(numHeaders) + require.NoError(t, err) + + return serializeHeaders(blockHashes, harness, t) +} + +// GenerateInValidBlockHeaders produces a slice of invalid block headers +// between the specified startHeight and endHeight. These headers are made +// invalid by creating a gap in the blockchain: headers up to +// lastValidHeaderHeight are generated and considered valid, then a set of +// headers are generated and discarded (not added to the result), followed by +// another set of headers that are added to the result. This gap results in +// a discontinuity making the entire sequence invalid. The function utilizes +// a test harness for generating and fetching block headers. +func GenerateInValidBlockHeaders(startHeight, endHeight, + lastValidHeaderHeight uint32, harness *rpctest.Harness, + t *testing.T) []byte { + + numHeaders := endHeight - startHeight + + // The lastValidHeaderHeight must be within the fetch + if lastValidHeaderHeight > endHeight-startHeight { + t.Fatalf("unable to generate invalid headers") + } + + totalBlockHashes := make([]*chainhash.Hash, 0, numHeaders) + blockHashes, err := harness.Client.Generate( + lastValidHeaderHeight - startHeight, + ) + require.NoError(t, err) + + totalBlockHashes = append(totalBlockHashes, blockHashes...) + + _, err = harness.Client.Generate(10) + require.NoError(t, err) + + blockHashes, err = harness.Client.Generate( + endHeight - lastValidHeaderHeight, + ) + require.NoError(t, err) + + totalBlockHashes = append(totalBlockHashes, blockHashes...) + + return serializeHeaders(totalBlockHashes, harness, t) +} + +func serializeHeaders(blockHashes []*chainhash.Hash, + harness *rpctest.Harness, t *testing.T) []byte { + + // First, we'll grab a buffer from the write buffer pool so we can + // reduce our total number of allocations, and also write the headers + // in a single swoop. + headerBuf := headerBufPool.Get().(*bytes.Buffer) + headerBuf.Reset() + defer headerBufPool.Put(headerBuf) + + for i := range blockHashes { + hdr, err := harness.Client.GetBlockHeader(blockHashes[i]) + require.NoError(t, err) + + err = hdr.Serialize(headerBuf) + require.NoError(t, err) + } + + return headerBuf.Bytes() +} + +// TestCfg defines a struct for configuring tests that involve generating +// or working with headers. It includes fields for specifying the +// start and end heights of the headers, the network type, and the data type +// of the headers. +type TestCfg struct { + StartHeight uint64 + EndHeight uint64 + Net wire.BitcoinNet + DataType dataType +} diff --git a/sync_test.go b/sync_test.go index 20e77c347..5c0bffcff 100644 --- a/sync_test.go +++ b/sync_test.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "reflect" "runtime" "strings" @@ -31,6 +32,8 @@ import ( "github.com/lightninglabs/neutrino" "github.com/lightninglabs/neutrino/banman" "github.com/lightninglabs/neutrino/headerfs" + "github.com/lightninglabs/neutrino/sideload" + "github.com/stretchr/testify/require" ) var ( @@ -39,7 +42,7 @@ var ( // btclog.LevelOff turns on log messages from the tests themselves as // well. Keep in mind some log messages may not appear in order due to // use of multiple query goroutines in the tests. - logLevel = btclog.LevelOff + logLevel = btclog.LevelInfo syncTimeout = 30 * time.Second syncUpdate = time.Second @@ -189,6 +192,17 @@ var ( ourKnownTxsByFilteredBlock = make(map[chainhash.Hash][]*btcutil.Tx) ) +func init() { + // Set up logging. + logger := btclog.NewBackend(os.Stdout) + chainLogger := logger.Logger("CHAIN") + chainLogger.SetLevel(logLevel) + neutrino.UseLogger(chainLogger) + rpcLogger := logger.Logger("RPCC") + rpcLogger.SetLevel(logLevel) + rpcclient.UseLogger(rpcLogger) +} + // secSource is an implementation of btcwallet/txauthor/SecretsSource that // stores WitnessPubKeyHash addresses. type secSource struct { @@ -1035,14 +1049,7 @@ func testRandomBlocks(harness *neutrinoHarness, t *testing.T) { } func TestNeutrinoSync(t *testing.T) { - // Set up logging. - logger := btclog.NewBackend(os.Stdout) - chainLogger := logger.Logger("CHAIN") - chainLogger.SetLevel(logLevel) - neutrino.UseLogger(chainLogger) - rpcLogger := logger.Logger("RPCC") - rpcLogger.SetLevel(logLevel) - rpcclient.UseLogger(rpcLogger) + // setUpLog() // Create a btcd SimNet node and generate 800 blocks h1, err := rpctest.New( @@ -1120,18 +1127,10 @@ func TestNeutrinoSync(t *testing.T) { // Copy parameters and insert checkpoints modParams := chaincfg.SimNetParams - for _, height := range []int64{111, 333, 555, 777, 999} { - hash, err := h1.Client.GetBlockHash(height) - if err != nil { - t.Fatalf("Couldn't get block hash for height %d: %s", - height, err) - } - modParams.Checkpoints = append(modParams.Checkpoints, - chaincfg.Checkpoint{ - Hash: hash, - Height: int32(height), - }) - } + checkptHeights := []int64{111, 333, 555, 777, 999} + checkpoints := createCheckpoints(checkptHeights, h1, t) + + modParams.Checkpoints = checkpoints // Create a temporary directory, initialize an empty walletdb with an // SPV chain namespace, and create a configuration for the ChainService. @@ -1181,6 +1180,300 @@ func TestNeutrinoSync(t *testing.T) { } } +func TestNeutrinoBlkHdrSideload(t *testing.T) { + // setUpLog() + + // Setup harness for testing. + harness, err := rpctest.New( + &chaincfg.SimNetParams, nil, []string{"--txindex"}, "", + ) + require.NoError(t, err) + + err = harness.SetUp(false, 0) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, harness.TearDown()) + }) + + // Create DB. + tempDir, err := os.MkdirTemp("", "neutrino") + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, os.RemoveAll(tempDir)) + }) + + db, err := walletdb.Create( + "bdb", tempDir+"/weks.db", true, dbOpenTimeout, + ) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + // Create function to clean up DB. + cleanUpDB := func() { + require.NoError(t, removeAllFilesInDir(tempDir)) + } + + // Create function to set up neutrino and confirm header store is at + // the specified height. + startNeutrinoCheckHeight := func(config neutrino.Config, height int32, + t *testing.T) { + + svc, err := neutrino.NewChainService(config) + require.NoError(t, err) + + err = svc.Start() + require.NoError(t, err) + + defer func() { + err = svc.Stop() + require.NoError(t, err) + }() + + _, tipHeight, err := svc.BlockHeaders.ChainTip() + require.NoError(t, err) + + require.Equal(t, height, int32(tipHeight)) + } + + _, err = harness.Client.Generate(1000) + require.NoError(t, err) + + // Set up bitcoin network and checkpoint for testing. + // Using a dev network and not connecting with any peers ensures that + // we fetch headers only from the sideleoad. + modParams := chaincfg.SimNetParams + + chkptHeights := []int64{111, 333, 555, 777, 999} + checkpoints := createCheckpoints(chkptHeights, harness, t) + + modParams.Checkpoints = checkpoints + + neutrinoConfig := neutrino.Config{ + DataDir: tempDir, + Database: db, + ChainParams: modParams, + } + + testCfg := &sideload.TestCfg{ + StartHeight: 0, + EndHeight: 556, + Net: modParams.Net, + DataType: sideload.BlockHeaders, + } + + type readerFunction func(*testing.T, *sideload.TestCfg, + []byte) io.ReadSeeker + + readerFuncsList := map[sideload.SourceType]readerFunction{ + sideload.Binary: sideload.GenerateEncodedBinaryReader, + } + + for sourceType, readerFunc := range readerFuncsList { + // Verify sideloading reaches source's end height while not + // verifying headers. + t.Run( + "Test preloading headers to source's last height "+ + " while skipVerify is true.", + func(t *testing.T) { + headers := sideload.GenerateValidBlockHeaders( + uint32( + testCfg.EndHeight-testCfg. + StartHeight, + ), harness, t, + ) + bLoader := readerFunc(t, testCfg, headers) + + neutrinoConfig.BlkHdrSideload = &neutrino. + SideloadOpt{ + SourceType: sourceType, + Reader: bLoader, + SkipVerify: true, + } + + startNeutrinoCheckHeight(neutrinoConfig, + int32(testCfg.EndHeight), t) + + // Database remains uncleared; header store + // should stay at `testCfg.EndHeight`. This test + // checks that sideloading from a source that + // has nothing to offer does not change the + // store's height. + startNeutrinoCheckHeight( + neutrinoConfig, int32( + testCfg.EndHeight, + ), t, + ) + }) + + // Clean up DB for next test. + cleanUpDB() + + chkptMgr := neutrino.NewBlockHeaderCheckpoints( + modParams, + ) + + // Verify sideloading stops at a checkpoint height ≤ source's + // last height when SkipVerify is false. + t.Run( + "Ensure sideloading halts at checkpoint ≤ source's "+ + "final height with verification", + func(t *testing.T) { + headers := sideload.GenerateValidBlockHeaders( + uint32( + testCfg.EndHeight-testCfg. + StartHeight, + ), harness, t, + ) + bLoader := readerFunc(t, testCfg, headers) + + neutrinoConfig.BlkHdrSideload = &neutrino. + SideloadOpt{ + SourceType: sourceType, + Reader: bLoader, + } + + // Fetch the closest checkpoint to height. + _, height, _ := chkptMgr. + FindPreviousHeaderCheckpoint( + uint32(testCfg.EndHeight), + ) + require.Equal(t, int(height), 555) + + startNeutrinoCheckHeight( + neutrinoConfig, int32(height), t, + ) + + // Database intact; expect no change in header + // store's height. This test verifies sideloading + // from a source lacking a full checkpoint range + // doesn't alter the store's height during + // verification. + startNeutrinoCheckHeight( + neutrinoConfig, int32(height), t, + ) + }) + + // Cleanup db. + cleanUpDB() + + // Verify stopping at the latest verified checkpoint when + // sideloading invalid headers with verification enabled. + t.Run( + "sideloading invalid headers, skipVerify false", + func(t *testing.T) { + lastValidHeaderHeight := uint32(230) + headers := sideload.GenerateInValidBlockHeaders( + uint32(testCfg.StartHeight), + uint32(testCfg.EndHeight), + lastValidHeaderHeight, harness, t, + ) + bLoader := readerFunc(t, testCfg, headers) + + neutrinoConfig.BlkHdrSideload = &neutrino. + SideloadOpt{ + SourceType: sourceType, + Reader: bLoader, + } + + _, height, _ := chkptMgr. + FindPreviousHeaderCheckpoint( + lastValidHeaderHeight, + ) + + startNeutrinoCheckHeight( + neutrinoConfig, + int32(height), t, + ) + + // Database remains uncleared; header store + // should stay the same. This test checks that + // sideloading from a source that has nothing to + // offer does not change the store's height. + startNeutrinoCheckHeight( + neutrinoConfig, + int32(height), t, + ) + }) + + // Cleanup db. + cleanUpDB() + + // Test that sideloading does not occur if there is a + // mistmatch in bitcoin network between the source and + // chainService. + t.Run("chain mismatch test", func(t *testing.T) { + testCfg.Net = wire.MainNet + headers := sideload.GenerateValidBlockHeaders( + uint32(testCfg.EndHeight-testCfg.StartHeight), + harness, t, + ) + bLoader := sideload.GenerateEncodedBinaryReader( + t, testCfg, headers, + ) + + neutrinoConfig.BlkHdrSideload = &neutrino.SideloadOpt{ + SourceType: sourceType, + Reader: bLoader, + } + + _, err := neutrino.NewChainService(neutrinoConfig) + require.Equal( + t, err.Error(), + sideload.ErrBitcoinNetworkMismatchFmt( + neutrinoConfig.ChainParams.Net, + testCfg.Net, + ).Error(), + ) + }) + + // Cleanup db. + cleanUpDB() + } +} + +// removeAllFilesInDir removes all files within the specified directory +// but leaves the directory itself intact. +func removeAllFilesInDir(dirPath string) error { + // Walk the directory. + err := filepath.Walk(dirPath, func(path string, info os.FileInfo, + err error) error { + + if err != nil { + return err // Return any encountered error. + } + if !info.IsDir() { + // If the file is not a directory, remove it. + err := os.Remove(path) + if err != nil { + return err // Return any error encountered while + // removing. + } + } + return nil + }) + return err +} + +func createCheckpoints(heights []int64, harness *rpctest.Harness, + t *testing.T) []chaincfg.Checkpoint { + + checkpoints := make([]chaincfg.Checkpoint, 0, len(heights)) + for _, height := range heights { + hash, err := harness.Client.GetBlockHash(height) + require.NoError(t, err) + + checkpoints = append(checkpoints, + chaincfg.Checkpoint{ + Hash: hash, + Height: int32(height), + }) + } + + return checkpoints +} + // csd does a connect-sync-disconnect between nodes in order to support // reorg testing. It brings up and tears down a temporary node, otherwise the // nodes try to reconnect to each other which results in unintended reorgs.