diff --git a/.gitignore b/.gitignore index 3b735ec..e0231d3 100644 --- a/.gitignore +++ b/.gitignore @@ -15,7 +15,7 @@ *.out # Dependency directories (remove the comment below to include it) -# vendor/ +vendor/* # Go workspace file go.work diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..f59f358 --- /dev/null +++ b/Makefile @@ -0,0 +1,17 @@ +default: help + +.PHONY: help +help: ## Print this help message + @echo "Available make commands:"; grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.PHONY: lint +lint: ## Run golangci-lint + golangci-lint run --out-format=tab + +.PHONY: lint-fix +lint-fix: ## Run golangci-lint with the --fix flag to fix linter errors + golangci-lint run --fix --out-format=tab --issues-exit-code=0 + +.PHONY: test +test: ## Run the Go unit tests + go test -race -v ./... diff --git a/README.md b/README.md index f5a1778..f042f5c 100644 --- a/README.md +++ b/README.md @@ -1 +1,4 @@ -# cometbft-client \ No newline at end of file +# cometbft-client + +Basic wrapper around the CometBFT RPC client. Useful for maintaing support for both current and +legacy versions of CometBFT. \ No newline at end of file diff --git a/abci/types/types.go b/abci/types/types.go new file mode 100644 index 0000000..4b04428 --- /dev/null +++ b/abci/types/types.go @@ -0,0 +1,104 @@ +package types + +import "github.com/strangelove-ventures/cometbft-client/proto/tendermint/crypto" + +const ( + CodeTypeOK uint32 = 0 +) + +// ValidatorUpdates is a list of validators that implements the Sort interface +type ValidatorUpdates []ValidatorUpdate + +type ValidatorUpdate struct { + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` +} + +type ExecTxResult struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +// IsOK returns true if Code is OK. +func (r ExecTxResult) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ExecTxResult) IsErr() bool { + return r.Code != CodeTypeOK +} + +// ----------------------------------------------- +// construct Result data + +// Event allows application developers to attach additional information to +// ResponseFinalizeBlock and ResponseCheckTx. +// Later, transactions may be queried using these events. +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` +} + +// EventAttribute is a single key-value pair, associated with an event. +type EventAttribute struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +type ResponseInfo struct { + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` +} + +type ResponseQuery struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // bytes data = 2; // use "value" instead. + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + ProofOps *ProofOps `protobuf:"bytes,8,opt,name=proof_ops,json=proofOps,proto3" json:"proofOps,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` +} + +func (r *ResponseQuery) IsOK() bool { + return r.Code == CodeTypeOK +} + +// ProofOps is Merkle proof defined by the list of ProofOps +type ProofOps struct { + Ops []ProofOp `protobuf:"bytes,1,rep,name=ops,proto3" json:"ops"` +} + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +type ProofOp struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +} diff --git a/client/client_wrapper.go b/client/client_wrapper.go new file mode 100644 index 0000000..aea7141 --- /dev/null +++ b/client/client_wrapper.go @@ -0,0 +1,363 @@ +package client + +import ( + "context" + "encoding/base64" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/strangelove-ventures/cometbft-client/abci/types" + _ "github.com/strangelove-ventures/cometbft-client/crypto/encoding" + "github.com/strangelove-ventures/cometbft-client/libs/bytes" + rpcclient "github.com/strangelove-ventures/cometbft-client/rpc/client" + rpchttp "github.com/strangelove-ventures/cometbft-client/rpc/client/http" + coretypes "github.com/strangelove-ventures/cometbft-client/rpc/core/types" + jsonrpc "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/client" + "github.com/strangelove-ventures/cometbft-client/types" +) + +// Client is a wrapper around the CometBFT RPC client. +type Client struct { + rpcClient rpcclient.Client +} + +// NewClient returns a pointer to a new instance of Client. +func NewClient(addr string, timeout time.Duration) (*Client, error) { + rpcClient, err := newRPCClient(addr, timeout) + if err != nil { + return nil, err + } + + return &Client{rpcClient}, nil +} + +// BlockResults fetches the block results at a specific height, +// it then parses the tx results and block events into our generalized types. +// This allows us to maintain backwards compatability with older versions of CometBFT. +func (c *Client) BlockResults(ctx context.Context, height *int64) (*BlockResponse, error) { + res, err := c.rpcClient.BlockResults(ctx, height) + if err != nil { + return nil, err + } + + var txRes []*ExecTxResponse + for _, tx := range res.TxsResults { + txRes = append(txRes, &ExecTxResponse{ + Code: tx.Code, + Data: tx.Data, + Log: tx.Log, + Info: tx.Info, + GasWanted: tx.GasWanted, + GasUsed: tx.GasUsed, + Events: parseEvents(tx.Events), + Codespace: tx.Codespace, + }) + } + + if res.FinalizeBlockEvents != nil && len(res.FinalizeBlockEvents) > 0 { + return &BlockResponse{ + Height: res.Height, + TxResponses: txRes, + Events: parseEvents(res.FinalizeBlockEvents), + ValidatorUpdates: res.ValidatorUpdates, + AppHash: res.AppHash, + }, nil + } + + events := res.BeginBlockEvents + events = append(events, res.EndBlockEvents...) + + return &BlockResponse{ + Height: res.Height, + TxResponses: txRes, + Events: parseEvents(events), + ValidatorUpdates: res.ValidatorUpdates, + AppHash: res.AppHash, + }, nil +} + +func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*TxResponse, error) { + res, err := c.rpcClient.Tx(ctx, hash, prove) + if err != nil { + return nil, err + } + + execTx := ExecTxResponse{ + Code: res.TxResult.Code, + Data: res.TxResult.Data, + Log: res.TxResult.Log, + Info: res.TxResult.Info, + GasWanted: res.TxResult.GasWanted, + GasUsed: res.TxResult.GasUsed, + Events: parseEvents(res.TxResult.Events), + Codespace: res.TxResult.Codespace, + } + + return &TxResponse{ + Hash: res.Hash, + Height: res.Height, + Index: res.Index, + ExecTx: execTx, + Tx: res.Tx, + Proof: res.Proof, + }, nil +} + +func (c *Client) TxSearch( + ctx context.Context, + query string, + prove bool, + page *int, + perPage *int, + orderBy string, +) ([]*TxResponse, error) { + res, err := c.rpcClient.TxSearch(ctx, query, prove, page, perPage, orderBy) + if err != nil { + return nil, err + } + + result := make([]*TxResponse, len(res.Txs)) + + for i, tx := range res.Txs { + execTx := ExecTxResponse{ + Code: tx.TxResult.Code, + Data: tx.TxResult.Data, + Log: tx.TxResult.Log, + Info: tx.TxResult.Info, + GasWanted: tx.TxResult.GasWanted, + GasUsed: tx.TxResult.GasUsed, + Events: parseEvents(tx.TxResult.Events), + Codespace: tx.TxResult.Codespace, + } + + result[i] = &TxResponse{ + Hash: tx.Hash, + Height: tx.Height, + Index: tx.Index, + ExecTx: execTx, + Tx: tx.Tx, + Proof: tx.Proof, + } + } + + return result, nil +} + +func (c *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { + res, err := c.rpcClient.Commit(ctx, height) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) Validators( + ctx context.Context, + height *int64, + page *int, + perPage *int, +) (*coretypes.ResultValidators, error) { + res, err := c.rpcClient.Validators(ctx, height, page, perPage) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { + res, err := c.rpcClient.Status(ctx) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { + res, err := c.rpcClient.Block(ctx, height) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) BlockSearch( + ctx context.Context, + query string, + page *int, + perPage *int, + orderBy string, +) (*coretypes.ResultBlockSearch, error) { + res, err := c.rpcClient.BlockSearch(ctx, query, page, perPage, orderBy) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*coretypes.ResultBlock, error) { + res, err := c.rpcClient.BlockByHash(ctx, hash) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { + res, err := c.rpcClient.BlockchainInfo(ctx, minHeight, maxHeight) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + res, err := c.rpcClient.BroadcastTxAsync(ctx, tx) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + res, err := c.rpcClient.BroadcastTxSync(ctx, tx) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { + res, err := c.rpcClient.BroadcastTxCommit(ctx, tx) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + res, err := c.rpcClient.ABCIInfo(ctx) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { + res, err := c.rpcClient.ABCIQuery(ctx, path, data) + if err != nil { + return nil, err + } + + return res, nil +} + +func (c *Client) ABCIQueryWithOptions( + ctx context.Context, + path string, + data bytes.HexBytes, + opts rpcclient.ABCIQueryOptions, +) (*coretypes.ResultABCIQuery, error) { + res, err := c.rpcClient.ABCIQueryWithOptions(ctx, path, data, opts) + if err != nil { + return nil, err + } + + return res, nil +} + +func newRPCClient(addr string, timeout time.Duration) (*rpchttp.HTTP, error) { + httpClient, err := jsonrpc.DefaultHTTPClient(addr) + if err != nil { + return nil, err + } + + httpClient.Timeout = timeout + + rpcClient, err := rpchttp.NewWithClient(addr, "/websocket", httpClient) + if err != nil { + return nil, err + } + + return rpcClient, nil +} + +// parseEvents returns a slice of sdk.StringEvent objects that are composed from a slice of abci.Event objects. +// parseEvents will first attempt to base64 decode the abci.Event objects and if an error is encountered it will +// fall back to the stringifyEvents function. +func parseEvents(events []abci.Event) sdk.StringEvents { + decodedEvents, err := base64DecodeEvents(events) + if err == nil { + return decodedEvents + } + + return stringifyEvents(events) +} + +// base64DecodeEvents attempts to base64 decode a slice of Event objects. +// An error is returned if base64 decoding any event in the slice fails. +func base64DecodeEvents(events []abci.Event) (sdk.StringEvents, error) { + sdkEvents := make(sdk.StringEvents, len(events)) + + for i, event := range events { + evt := sdk.StringEvent{Type: event.Type} + + for _, attr := range event.Attributes { + key, err := base64.StdEncoding.DecodeString(attr.Key) + if err != nil { + return nil, err + } + + value, err := base64.StdEncoding.DecodeString(attr.Value) + if err != nil { + return nil, err + } + + evt.Attributes = append(evt.Attributes, sdk.Attribute{ + Key: string(key), + Value: string(value), + }) + } + + sdkEvents[i] = evt + } + + return sdkEvents, nil +} + +// stringifyEvents converts a slice of Event objects into a slice of StringEvent objects. +// This function is copied straight from the Cosmos SDK, so we can alter it to handle our abci.Event type. +func stringifyEvents(events []abci.Event) sdk.StringEvents { + res := make(sdk.StringEvents, 0, len(events)) + + for _, e := range events { + res = append(res, stringifyEvent(e)) + } + + return res +} + +// stringifyEvent converts an Event object to a StringEvent object. +// This function is copied straight from the Cosmos SDK, so we can alter it to handle our abci.Event type. +func stringifyEvent(e abci.Event) sdk.StringEvent { + res := sdk.StringEvent{Type: e.Type} + + for _, attr := range e.Attributes { + res.Attributes = append( + res.Attributes, + sdk.Attribute{Key: attr.Key, Value: attr.Value}, + ) + } + + return res +} diff --git a/client/client_wrapper_test.go b/client/client_wrapper_test.go new file mode 100644 index 0000000..5af80c3 --- /dev/null +++ b/client/client_wrapper_test.go @@ -0,0 +1,274 @@ +package client + +import ( + "context" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/strangelove-ventures/cometbft-client/libs/bytes" + "github.com/stretchr/testify/require" +) + +const url = "https://rpc.osmosis.strange.love:443" + +// TODO: this hardcoded value makes the test brittle since the underlying node may not have this state persisted +var blockHeight = int64(13311684) + +func testClient(t *testing.T) *Client { + client, err := NewClient(url, 5*time.Second) + require.NoError(t, err, "failed to initialize client") + + return client +} + +func TestClientStatus(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.Status(context.Background()) + require.NoError(t, err, "failed to get client status") + + resJson, err := json.Marshal(res) + require.NoError(t, err) + + t.Logf("Status Resp: %s \n", resJson) +} + +func TestBlockResults(t *testing.T) { + client := testClient(t) + + ctx := context.Background() + res, err := client.rpcClient.BlockResults(ctx, nil) + require.NoError(t, err, "failed to get block results") + + resJson, err := json.Marshal(res) + require.NoError(t, err) + + t.Logf("Block Results: %s \n", resJson) + + res2, err := client.BlockResults(ctx, nil) + require.NoError(t, err) + + res2Json, err := json.Marshal(res2) + require.NoError(t, err) + + t.Logf("Block Results: %s \n", res2Json) +} + +func TestABCIInfo(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.ABCIInfo(context.Background()) + require.NoError(t, err, "failed to get ABCI info") + + resJson, err := json.Marshal(res) + require.NoError(t, err) + + t.Logf("ABCI Info: %s \n", resJson) +} + +func TestABCIQuery(t *testing.T) { + client := testClient(t) + + // TODO: pass in valid values for path and data + path := "" + data := bytes.HexBytes{} + + res, err := client.rpcClient.ABCIQuery(context.Background(), path, data) + require.NoError(t, err, "failed to query ABCI") + + require.Equal(t, uint32(6), res.Response.Code) + require.Equal(t, "no query path provided: unknown request", res.Response.Log) + require.Equal(t, "sdk", res.Response.Codespace) + + resJson, err := json.Marshal(res) + require.NoError(t, err) + + t.Logf("ABCI Query: %s \n", resJson) +} + +func TestBlockByHeight(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.BlockResults(context.Background(), &blockHeight) + require.NoError(t, err, "failed to get block results") + + resJson, err := json.Marshal(res) + require.NoError(t, err) + + t.Logf("Block Results: %s \n", resJson) +} + +func TestConsensusParams(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.ConsensusParams(context.Background(), &blockHeight) + if err != nil { + t.Fatalf("Failed to get consensus params: %v", err) + } + + t.Logf("Consensus Params: %v \n", res) +} + +func TestConsensusState(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.ConsensusState(context.Background()) + if err != nil { + t.Fatalf("Failed to get consensus state: %v", err) + } + + t.Logf("Consensus State: %v \n", res) +} + +func TestDumpConsensusState(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.DumpConsensusState(context.Background()) + if err != nil { + t.Fatalf("Failed to dump consensus state: %v", err) + } + + t.Logf("Dump Consensus State: %v \n", res) +} + +func TestGenesis(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.Genesis(context.Background()) + if err != nil && !strings.Contains(err.Error(), "genesis response is large, please use the genesis_chunked API instead") { + t.Fatalf("Failed to get genesis: %v", err) + } + + t.Logf("Genesis: %v \n", res) +} + +func TestGenesisChunked(t *testing.T) { + client := testClient(t) + + chunk := uint(1) + res, err := client.rpcClient.GenesisChunked(context.Background(), chunk) + if err != nil { + t.Fatalf("Failed to get genesis chunk: %v", err) + } + + t.Logf("Genesis Chunk: %v \n", res) +} + +func TestHealth(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.Health(context.Background()) + if err != nil { + t.Fatalf("Failed to get health status: %v", err) + } + + t.Logf("Health Status: %v \n", res) +} + +func TestNetInfo(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.NetInfo(context.Background()) + if err != nil { + t.Fatalf("Failed to get network info: %v", err) + } + + t.Logf("Network Info: %v \n", res) +} + +func TestNumUnconfirmedTxs(t *testing.T) { + client := testClient(t) + + res, err := client.rpcClient.NumUnconfirmedTxs(context.Background()) + if err != nil { + t.Fatalf("Failed to get number of unconfirmed txs: %v \n", err) + } + + t.Logf("Num Of Unconfirmed Txs: %v \n", res) +} + +func TestUnconfirmedTxs(t *testing.T) { + client := testClient(t) + + limit := 5 + res, err := client.rpcClient.UnconfirmedTxs(context.Background(), &limit) + if err != nil { + t.Fatalf("Failed to get unconfirmed txs with limit %d: %v \n", limit, err) + } + + t.Logf("Unconfirmed Txs: %v \n", res) + require.Equal(t, limit+1, res.Count) // TODO: upstream off by one error? +} + +func TestValidators(t *testing.T) { + client := testClient(t) + + page := 1 + perPage := 5 + + res, err := client.rpcClient.Validators(context.Background(), &blockHeight, &page, &perPage) + if err != nil { + t.Fatalf("Failed to get validators: %v", err) + } + + t.Logf("Validators: %v \n", res) + require.Equal(t, perPage, res.Count) +} + +func TestBlockByHash(t *testing.T) { + +} + +func TestBlockSearch(t *testing.T) { + +} + +func TestBlockchainMinMaxHeight(t *testing.T) { + +} + +func TestBroadcastEvidence(t *testing.T) { + +} + +func TestBroadcastTxAsync(t *testing.T) { + +} + +func TestBroadcastTxCommit(t *testing.T) { + +} + +func TestBroadcastTxSync(t *testing.T) { + +} + +func TestCheckTx(t *testing.T) { + +} + +func TestCommit(t *testing.T) { + +} + +func TestUnsubscribeByQuery(t *testing.T) { + +} + +func TestUnsubscribeAll(t *testing.T) { + +} + +func TestSubscribe(t *testing.T) { + +} + +func TestTxByHash(t *testing.T) { + +} + +func TestTxSearch(t *testing.T) { + +} diff --git a/client/response.go b/client/response.go new file mode 100644 index 0000000..9ce16a7 --- /dev/null +++ b/client/response.go @@ -0,0 +1,46 @@ +package client + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/strangelove-ventures/cometbft-client/abci/types" + "github.com/strangelove-ventures/cometbft-client/libs/bytes" + "github.com/strangelove-ventures/cometbft-client/types" +) + +// BlockResponse is used in place of the CometBFT type ResultBlockResults. +// This allows us to handle the decoding of events internally so that we can return events to consumers as raw strings. +type BlockResponse struct { + Height int64 + TxResponses []*ExecTxResponse + Events sdk.StringEvents + ValidatorUpdates []abci.ValidatorUpdate + AppHash []byte +} + +// ExecTxResponse is used in place of the CometBFT type ExecTxResult. +// This allows us to handle the decoding of events internally so that we can return events to consumers as raw strings. +type ExecTxResponse struct { + Code uint32 + Data []byte + Log string + Info string + GasWanted int64 + GasUsed int64 + Events sdk.StringEvents + Codespace string +} + +func (e *ExecTxResponse) IsOK() bool { + return e.Code == abci.CodeTypeOK +} + +// TxResponse is used in place of the CometBFT type ResultTx. +// This allows us to handle the decoding of events internally so that we can return events to consumers as raw strings. +type TxResponse struct { + Hash bytes.HexBytes + Height int64 + Index uint32 + ExecTx ExecTxResponse + Tx types.Tx + Proof types.TxProof +} diff --git a/crypto/armor/armor.go b/crypto/armor/armor.go new file mode 100644 index 0000000..99e2c3b --- /dev/null +++ b/crypto/armor/armor.go @@ -0,0 +1,39 @@ +package armor + +import ( + "bytes" + "fmt" + "io" + + "golang.org/x/crypto/openpgp/armor" //nolint: staticcheck +) + +func EncodeArmor(blockType string, headers map[string]string, data []byte) string { + buf := new(bytes.Buffer) + w, err := armor.Encode(buf, blockType, headers) + if err != nil { + panic(fmt.Errorf("could not encode ascii armor: %s", err)) + } + _, err = w.Write(data) + if err != nil { + panic(fmt.Errorf("could not encode ascii armor: %s", err)) + } + err = w.Close() + if err != nil { + panic(fmt.Errorf("could not encode ascii armor: %s", err)) + } + return buf.String() +} + +func DecodeArmor(armorStr string) (blockType string, headers map[string]string, data []byte, err error) { + buf := bytes.NewBufferString(armorStr) + block, err := armor.Decode(buf) + if err != nil { + return "", nil, nil, err + } + data, err = io.ReadAll(block.Body) + if err != nil { + return "", nil, nil, err + } + return block.Type, block.Header, data, nil +} diff --git a/crypto/armor/armor_test.go b/crypto/armor/armor_test.go new file mode 100644 index 0000000..8ecfaa0 --- /dev/null +++ b/crypto/armor/armor_test.go @@ -0,0 +1,20 @@ +package armor + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestArmor(t *testing.T) { + blockType := "MINT TEST" + data := []byte("somedata") + armorStr := EncodeArmor(blockType, nil, data) + + // Decode armorStr and test for equivalence. + blockType2, _, data2, err := DecodeArmor(armorStr) + require.Nil(t, err, "%+v", err) + assert.Equal(t, blockType, blockType2) + assert.Equal(t, data, data2) +} diff --git a/crypto/crypto.go b/crypto/crypto.go new file mode 100644 index 0000000..0cd1761 --- /dev/null +++ b/crypto/crypto.go @@ -0,0 +1,54 @@ +package crypto + +import ( + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" + "github.com/strangelove-ventures/cometbft-client/libs/bytes" +) + +const ( + // AddressSize is the size of a pubkey address. + AddressSize = tmhash.TruncatedSize +) + +// An address is a []byte, but hex-encoded even in JSON. +// []byte leaves us the option to change the address length. +// Use an alias so Unmarshal methods (with ptr receivers) are available too. +type Address = bytes.HexBytes + +func AddressHash(bz []byte) Address { + return Address(tmhash.SumTruncated(bz)) +} + +type PubKey interface { + Address() Address + Bytes() []byte + VerifySignature(msg []byte, sig []byte) bool + Equals(PubKey) bool + Type() string +} + +type PrivKey interface { + Bytes() []byte + Sign(msg []byte) ([]byte, error) + PubKey() PubKey + Equals(PrivKey) bool + Type() string +} + +type Symmetric interface { + Keygen() []byte + Encrypt(plaintext []byte, secret []byte) (ciphertext []byte) + Decrypt(ciphertext []byte, secret []byte) (plaintext []byte, err error) +} + +// If a new key type implements batch verification, +// the key type must be registered in github.com/strangelove-ventures/cometbft-client/crypto/batch +type BatchVerifier interface { + // Add appends an entry into the BatchVerifier. + Add(key PubKey, message, signature []byte) error + // Verify verifies all the entries in the BatchVerifier, and returns + // if every signature in the batch is valid, and a vector of bools + // indicating the verification status of each signature (in the order + // that signatures were added to the batch). + Verify() (bool, []bool) +} diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go new file mode 100644 index 0000000..101e7b5 --- /dev/null +++ b/crypto/ed25519/ed25519.go @@ -0,0 +1,228 @@ +package ed25519 + +import ( + "bytes" + "crypto/subtle" + "errors" + "fmt" + "io" + + "github.com/oasisprotocol/curve25519-voi/primitives/ed25519" + "github.com/oasisprotocol/curve25519-voi/primitives/ed25519/extra/cache" + + "github.com/strangelove-ventures/cometbft-client/crypto" + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" +) + +//------------------------------------- + +var ( + _ crypto.PrivKey = PrivKey{} + _ crypto.BatchVerifier = &BatchVerifier{} + + // curve25519-voi's Ed25519 implementation supports configurable + // verification behavior, and CometBFT uses the ZIP-215 verification + // semantics. + verifyOptions = &ed25519.Options{ + Verify: ed25519.VerifyOptionsZIP_215, + } + + cachingVerifier = cache.NewVerifier(cache.NewLRUCache(cacheSize)) +) + +const ( + PrivKeyName = "tendermint/PrivKeyEd25519" + PubKeyName = "tendermint/PubKeyEd25519" + // PubKeySize is is the size, in bytes, of public keys as used in this package. + PubKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // Size of an Edwards25519 signature. Namely the size of a compressed + // Edwards25519 point, and a field element. Both of which are 32 bytes. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the + // private key representations used by RFC 8032. + SeedSize = 32 + + KeyType = "ed25519" + + // cacheSize is the number of public keys that will be cached in + // an expanded format for repeated signature verification. + // + // TODO/perf: Either this should exclude single verification, or be + // tuned to `> validatorSize + maxTxnsPerBlock` to avoid cache + // thrashing. + cacheSize = 4096 +) + +func init() { + cmtjson.RegisterType(PubKey{}, PubKeyName) + cmtjson.RegisterType(PrivKey{}, PrivKeyName) +} + +// PrivKey implements crypto.PrivKey. +type PrivKey []byte + +// Bytes returns the privkey byte format. +func (privKey PrivKey) Bytes() []byte { + return []byte(privKey) +} + +// Sign produces a signature on the provided message. +// This assumes the privkey is wellformed in the golang format. +// The first 32 bytes should be random, +// corresponding to the normal ed25519 private key. +// The latter 32 bytes should be the compressed public key. +// If these conditions aren't met, Sign will panic or produce an +// incorrect signature. +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + signatureBytes := ed25519.Sign(ed25519.PrivateKey(privKey), msg) + return signatureBytes, nil +} + +// PubKey gets the corresponding public key from the private key. +// +// Panics if the private key is not initialized. +func (privKey PrivKey) PubKey() crypto.PubKey { + // If the latter 32 bytes of the privkey are all zero, privkey is not + // initialized. + initialized := false + for _, v := range privKey[32:] { + if v != 0 { + initialized = true + break + } + } + + if !initialized { + panic("Expected ed25519 PrivKey to include concatenated pubkey bytes") + } + + pubkeyBytes := make([]byte, PubKeySize) + copy(pubkeyBytes, privKey[32:]) + return PubKey(pubkeyBytes) +} + +// Equals - you probably don't need to use this. +// Runs in constant time based on length of the keys. +func (privKey PrivKey) Equals(other crypto.PrivKey) bool { + if otherEd, ok := other.(PrivKey); ok { + return subtle.ConstantTimeCompare(privKey[:], otherEd[:]) == 1 + } + + return false +} + +func (privKey PrivKey) Type() string { + return KeyType +} + +// GenPrivKey generates a new ed25519 private key. +// It uses OS randomness in conjunction with the current global random seed +// in cometbft/libs/rand to generate the private key. +func GenPrivKey() PrivKey { + return genPrivKey(crypto.CReader()) +} + +// genPrivKey generates a new ed25519 private key using the provided reader. +func genPrivKey(rand io.Reader) PrivKey { + _, priv, err := ed25519.GenerateKey(rand) + if err != nil { + panic(err) + } + + return PrivKey(priv) +} + +// GenPrivKeyFromSecret hashes the secret with SHA2, and uses +// that 32 byte output to create the private key. +// NOTE: secret should be the output of a KDF like bcrypt, +// if it's derived from user input. +func GenPrivKeyFromSecret(secret []byte) PrivKey { + seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. + + return PrivKey(ed25519.NewKeyFromSeed(seed)) +} + +//------------------------------------- + +var _ crypto.PubKey = PubKey{} + +// PubKey implements crypto.PubKey for the Ed25519 signature scheme. +type PubKey []byte + +// Address is the SHA256-20 of the raw pubkey bytes. +func (pubKey PubKey) Address() crypto.Address { + if len(pubKey) != PubKeySize { + panic("pubkey is incorrect size") + } + return crypto.Address(tmhash.SumTruncated(pubKey)) +} + +// Bytes returns the PubKey byte format. +func (pubKey PubKey) Bytes() []byte { + return []byte(pubKey) +} + +func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool { + // make sure we use the same algorithm to sign + if len(sig) != SignatureSize { + return false + } + + return cachingVerifier.VerifyWithOptions(ed25519.PublicKey(pubKey), msg, sig, verifyOptions) +} + +func (pubKey PubKey) String() string { + return fmt.Sprintf("PubKeyEd25519{%X}", []byte(pubKey)) +} + +func (pubKey PubKey) Type() string { + return KeyType +} + +func (pubKey PubKey) Equals(other crypto.PubKey) bool { + if otherEd, ok := other.(PubKey); ok { + return bytes.Equal(pubKey[:], otherEd[:]) + } + + return false +} + +//------------------------------------- + +// BatchVerifier implements batch verification for ed25519. +type BatchVerifier struct { + *ed25519.BatchVerifier +} + +func NewBatchVerifier() crypto.BatchVerifier { + return &BatchVerifier{ed25519.NewBatchVerifier()} +} + +func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error { + pkEd, ok := key.(PubKey) + if !ok { + return fmt.Errorf("pubkey is not Ed25519") + } + + pkBytes := pkEd.Bytes() + + if l := len(pkBytes); l != PubKeySize { + return fmt.Errorf("pubkey size is incorrect; expected: %d, got %d", PubKeySize, l) + } + + // check that the signature is the correct length + if len(signature) != SignatureSize { + return errors.New("invalid signature") + } + + cachingVerifier.AddWithOptions(b.BatchVerifier, ed25519.PublicKey(pkBytes), msg, signature, verifyOptions) + + return nil +} + +func (b *BatchVerifier) Verify() (bool, []bool) { + return b.BatchVerifier.Verify(crypto.CReader()) +} diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go new file mode 100644 index 0000000..2b68bac --- /dev/null +++ b/crypto/ed25519/ed25519_test.go @@ -0,0 +1,54 @@ +package ed25519_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/crypto" + "github.com/strangelove-ventures/cometbft-client/crypto/ed25519" +) + +func TestSignAndValidateEd25519(t *testing.T) { + privKey := ed25519.GenPrivKey() + pubKey := privKey.PubKey() + + msg := crypto.CRandBytes(128) + sig, err := privKey.Sign(msg) + require.Nil(t, err) + + // Test the signature + assert.True(t, pubKey.VerifySignature(msg, sig)) + + // Mutate the signature, just one bit. + // TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10 + sig[7] ^= byte(0x01) + + assert.False(t, pubKey.VerifySignature(msg, sig)) +} + +func TestBatchSafe(t *testing.T) { + v := ed25519.NewBatchVerifier() + + for i := 0; i <= 38; i++ { + priv := ed25519.GenPrivKey() + pub := priv.PubKey() + + var msg []byte + if i%2 == 0 { + msg = []byte("easter") + } else { + msg = []byte("egg") + } + + sig, err := priv.Sign(msg) + require.NoError(t, err) + + err = v.Add(pub, msg, sig) + require.NoError(t, err) + } + + ok, _ := v.Verify() + require.True(t, ok) +} diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go new file mode 100644 index 0000000..7482ed9 --- /dev/null +++ b/crypto/encoding/codec.go @@ -0,0 +1,12 @@ +package encoding + +import ( + "github.com/strangelove-ventures/cometbft-client/libs/json" + pc "github.com/strangelove-ventures/cometbft-client/proto/tendermint/crypto" +) + +func init() { + json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey") + json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") + json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") +} diff --git a/crypto/example_test.go b/crypto/example_test.go new file mode 100644 index 0000000..7bcaee6 --- /dev/null +++ b/crypto/example_test.go @@ -0,0 +1,14 @@ +package crypto_test + +import ( + "fmt" + + "github.com/strangelove-ventures/cometbft-client/crypto" +) + +func ExampleSha256() { + sum := crypto.Sha256([]byte("This is CometBFT")) + fmt.Printf("%x\n", sum) + // Output: + // ea186526b041852d923b02c91aa04b00c0df258b3d69cb688eaba577f5562758 +} diff --git a/crypto/hash.go b/crypto/hash.go new file mode 100644 index 0000000..e1d2252 --- /dev/null +++ b/crypto/hash.go @@ -0,0 +1,11 @@ +package crypto + +import ( + "crypto/sha256" +) + +func Sha256(bytes []byte) []byte { + hasher := sha256.New() + hasher.Write(bytes) + return hasher.Sum(nil) +} diff --git a/crypto/merkle/doc.go b/crypto/merkle/doc.go new file mode 100644 index 0000000..fe50b34 --- /dev/null +++ b/crypto/merkle/doc.go @@ -0,0 +1,30 @@ +/* +Package merkle computes a deterministic minimal height Merkle tree hash. +If the number of items is not a power of two, some leaves +will be at different levels. Tries to keep both sides of +the tree the same size, but the left may be one greater. + +Use this for short deterministic trees, such as the validator list. +For larger datasets, use IAVLTree. + +Be aware that the current implementation by itself does not prevent +second pre-image attacks. Hence, use this library with caution. +Otherwise you might run into similar issues as, e.g., in early Bitcoin: +https://bitcointalk.org/?topic=102395 + + * + / \ + / \ + / \ + / \ + * * + / \ / \ + / \ / \ + / \ / \ + * * * h6 + / \ / \ / \ + h0 h1 h2 h3 h4 h5 + +TODO(ismail): add 2nd pre-image protection or clarify further on how we use this and why this secure. +*/ +package merkle diff --git a/crypto/merkle/hash.go b/crypto/merkle/hash.go new file mode 100644 index 0000000..25950c8 --- /dev/null +++ b/crypto/merkle/hash.go @@ -0,0 +1,48 @@ +package merkle + +import ( + "hash" + + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" +) + +// TODO: make these have a large predefined capacity +var ( + leafPrefix = []byte{0} + innerPrefix = []byte{1} +) + +// returns tmhash() +func emptyHash() []byte { + return tmhash.Sum([]byte{}) +} + +// returns tmhash(0x00 || leaf) +func leafHash(leaf []byte) []byte { + return tmhash.Sum(append(leafPrefix, leaf...)) +} + +// returns tmhash(0x00 || leaf) +func leafHashOpt(s hash.Hash, leaf []byte) []byte { + s.Reset() + s.Write(leafPrefix) + s.Write(leaf) + return s.Sum(nil) +} + +// returns tmhash(0x01 || left || right) +func innerHash(left []byte, right []byte) []byte { + data := make([]byte, len(innerPrefix)+len(left)+len(right)) + n := copy(data, innerPrefix) + n += copy(data[n:], left) + copy(data[n:], right) + return tmhash.Sum(data) +} + +func innerHashOpt(s hash.Hash, left []byte, right []byte) []byte { + s.Reset() + s.Write(innerPrefix) + s.Write(left) + s.Write(right) + return s.Sum(nil) +} diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go new file mode 100644 index 0000000..e5429a0 --- /dev/null +++ b/crypto/merkle/proof.go @@ -0,0 +1,251 @@ +package merkle + +import ( + "bytes" + "errors" + "fmt" + + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" +) + +const ( + // MaxAunts is the maximum number of aunts that can be included in a Proof. + // This corresponds to a tree of size 2^100, which should be sufficient for all conceivable purposes. + // This maximum helps prevent Denial-of-Service attacks by limitting the size of the proofs. + MaxAunts = 100 +) + +// Proof represents a Merkle proof. +// NOTE: The convention for proofs is to include leaf hashes but to +// exclude the root hash. +// This convention is implemented across IAVL range proofs as well. +// Keep this consistent unless there's a very good reason to change +// everything. This also affects the generalized proof system as +// well. +type Proof struct { + Total int64 `json:"total"` // Total number of items. + Index int64 `json:"index"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. +} + +// ProofsFromByteSlices computes inclusion proof for given items. +// proofs[0] is the proof for items[0]. +func ProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Proof) { + trails, rootSPN := trailsFromByteSlices(items) + rootHash = rootSPN.Hash + proofs = make([]*Proof, len(items)) + for i, trail := range trails { + proofs[i] = &Proof{ + Total: int64(len(items)), + Index: int64(i), + LeafHash: trail.Hash, + Aunts: trail.FlattenAunts(), + } + } + return +} + +// Verify that the Proof proves the root hash. +// Check sp.Index/sp.Total manually if needed +func (sp *Proof) Verify(rootHash []byte, leaf []byte) error { + if rootHash == nil { + return fmt.Errorf("invalid root hash: cannot be nil") + } + if sp.Total < 0 { + return errors.New("proof total must be positive") + } + if sp.Index < 0 { + return errors.New("proof index cannot be negative") + } + leafHash := leafHash(leaf) + if !bytes.Equal(sp.LeafHash, leafHash) { + return fmt.Errorf("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) + } + computedHash, err := sp.computeRootHash() + if err != nil { + return fmt.Errorf("compute root hash: %w", err) + } + if !bytes.Equal(computedHash, rootHash) { + return fmt.Errorf("invalid root hash: wanted %X got %X", rootHash, computedHash) + } + return nil +} + +// Compute the root hash given a leaf hash. Panics in case of errors. +func (sp *Proof) ComputeRootHash() []byte { + computedHash, err := sp.computeRootHash() + if err != nil { + panic(fmt.Errorf("ComputeRootHash errored %w", err)) + } + return computedHash +} + +// Compute the root hash given a leaf hash. +func (sp *Proof) computeRootHash() ([]byte, error) { + return computeHashFromAunts( + sp.Index, + sp.Total, + sp.LeafHash, + sp.Aunts, + ) +} + +// String implements the stringer interface for Proof. +// It is a wrapper around StringIndented. +func (sp *Proof) String() string { + return sp.StringIndented("") +} + +// StringIndented generates a canonical string representation of a Proof. +func (sp *Proof) StringIndented(indent string) string { + return fmt.Sprintf(`Proof{ +%s Aunts: %X +%s}`, + indent, sp.Aunts, + indent) +} + +// ValidateBasic performs basic validation. +// NOTE: it expects the LeafHash and the elements of Aunts to be of size tmhash.Size, +// and it expects at most MaxAunts elements in Aunts. +func (sp *Proof) ValidateBasic() error { + if sp.Total < 0 { + return errors.New("negative Total") + } + if sp.Index < 0 { + return errors.New("negative Index") + } + if len(sp.LeafHash) != tmhash.Size { + return fmt.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash)) + } + if len(sp.Aunts) > MaxAunts { + return fmt.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts)) + } + for i, auntHash := range sp.Aunts { + if len(auntHash) != tmhash.Size { + return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash)) + } + } + return nil +} + +//func (sp *Proof) ToProto() *cmtcrypto.Proof { +// if sp == nil { +// return nil +// } +// pb := new(cmtcrypto.Proof) +// +// pb.Total = sp.Total +// pb.Index = sp.Index +// pb.LeafHash = sp.LeafHash +// pb.Aunts = sp.Aunts +// +// return pb +//} + +//func ProofFromProto(pb *cmtcrypto.Proof) (*Proof, error) { +// if pb == nil { +// return nil, errors.New("nil proof") +// } +// +// sp := new(Proof) +// +// sp.Total = pb.Total +// sp.Index = pb.Index +// sp.LeafHash = pb.LeafHash +// sp.Aunts = pb.Aunts +// +// return sp, sp.ValidateBasic() +//} + +// Use the leafHash and innerHashes to get the root merkle hash. +// If the length of the innerHashes slice isn't exactly correct, the result is nil. +// Recursive impl. +func computeHashFromAunts(index, total int64, leafHash []byte, innerHashes [][]byte) ([]byte, error) { + if index >= total || index < 0 || total <= 0 { + return nil, fmt.Errorf("invalid index %d and/or total %d", index, total) + } + switch total { + case 0: + panic("Cannot call computeHashFromAunts() with 0 total") + case 1: + if len(innerHashes) != 0 { + return nil, fmt.Errorf("unexpected inner hashes") + } + return leafHash, nil + default: + if len(innerHashes) == 0 { + return nil, fmt.Errorf("expected at least one inner hash") + } + numLeft := getSplitPoint(total) + if index < numLeft { + leftHash, err := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if err != nil { + return nil, err + } + + return innerHash(leftHash, innerHashes[len(innerHashes)-1]), nil + } + rightHash, err := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if err != nil { + return nil, err + } + return innerHash(innerHashes[len(innerHashes)-1], rightHash), nil + } +} + +// ProofNode is a helper structure to construct merkle proof. +// The node and the tree is thrown away afterwards. +// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. +// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or +// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. +type ProofNode struct { + Hash []byte + Parent *ProofNode + Left *ProofNode // Left sibling (only one of Left,Right is set) + Right *ProofNode // Right sibling (only one of Left,Right is set) +} + +// FlattenAunts will return the inner hashes for the item corresponding to the leaf, +// starting from a leaf ProofNode. +func (spn *ProofNode) FlattenAunts() [][]byte { + // Nonrecursive impl. + innerHashes := [][]byte{} + for spn != nil { + switch { + case spn.Left != nil: + innerHashes = append(innerHashes, spn.Left.Hash) + case spn.Right != nil: + innerHashes = append(innerHashes, spn.Right.Hash) + default: + break + } + spn = spn.Parent + } + return innerHashes +} + +// trails[0].Hash is the leaf hash for items[0]. +// trails[i].Parent.Parent....Parent == root for all i. +func trailsFromByteSlices(items [][]byte) (trails []*ProofNode, root *ProofNode) { + // Recursive impl. + switch len(items) { + case 0: + return []*ProofNode{}, &ProofNode{emptyHash(), nil, nil, nil} + case 1: + trail := &ProofNode{leafHash(items[0]), nil, nil, nil} + return []*ProofNode{trail}, trail + default: + k := getSplitPoint(int64(len(items))) + lefts, leftRoot := trailsFromByteSlices(items[:k]) + rights, rightRoot := trailsFromByteSlices(items[k:]) + rootHash := innerHash(leftRoot.Hash, rightRoot.Hash) + root := &ProofNode{rootHash, nil, nil, nil} + leftRoot.Parent = root + leftRoot.Right = rightRoot + rightRoot.Parent = root + rightRoot.Left = leftRoot + return append(lefts, rights...), root + } +} diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go new file mode 100644 index 0000000..ca8b5f0 --- /dev/null +++ b/crypto/merkle/proof_key_path.go @@ -0,0 +1,110 @@ +package merkle + +import ( + "encoding/hex" + "errors" + "fmt" + "net/url" + "strings" +) + +/* + + For generalized Merkle proofs, each layer of the proof may require an + optional key. The key may be encoded either by URL-encoding or + (upper-case) hex-encoding. + TODO: In the future, more encodings may be supported, like base32 (e.g. + /32:) + + For example, for a Cosmos-SDK application where the first two proof layers + are ValueOps, and the third proof layer is an IAVLValueOp, the keys + might look like: + + 0: []byte("App") + 1: []byte("IBC") + 2: []byte{0x01, 0x02, 0x03} + + Assuming that we know that the first two layers are always ASCII texts, we + probably want to use URLEncoding for those, whereas the third layer will + require HEX encoding for efficient representation. + + kp := new(KeyPath) + kp.AppendKey([]byte("App"), KeyEncodingURL) + kp.AppendKey([]byte("IBC"), KeyEncodingURL) + kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) + kp.String() // Should return "/App/IBC/x:010203" + + NOTE: Key paths must begin with a `/`. + + NOTE: All encodings *MUST* work compatibly, such that you can choose to use + whatever encoding, and the decoded keys will always be the same. In other + words, it's just as good to encode all three keys using URL encoding or HEX + encoding... it just wouldn't be optimal in terms of readability or space + efficiency. + + NOTE: Punycode will never be supported here, because not all values can be + decoded. For example, no string decodes to the string "xn--blah" in + Punycode. + +*/ + +type keyEncoding int + +const ( + KeyEncodingURL keyEncoding = iota + KeyEncodingHex + KeyEncodingMax // Number of known encodings. Used for testing +) + +type Key struct { + name []byte + enc keyEncoding +} + +type KeyPath []Key + +func (pth KeyPath) AppendKey(key []byte, enc keyEncoding) KeyPath { + return append(pth, Key{key, enc}) +} + +func (pth KeyPath) String() string { + res := "" + for _, key := range pth { + switch key.enc { + case KeyEncodingURL: + res += "/" + url.PathEscape(string(key.name)) + case KeyEncodingHex: + res += "/x:" + fmt.Sprintf("%X", key.name) + default: + panic("unexpected key encoding type") + } + } + return res +} + +// Decode a path to a list of keys. Path must begin with `/`. +// Each key must use a known encoding. +func KeyPathToKeys(path string) (keys [][]byte, err error) { + if path == "" || path[0] != '/' { + return nil, errors.New("key path string must start with a forward slash '/'") + } + parts := strings.Split(path[1:], "/") + keys = make([][]byte, len(parts)) + for i, part := range parts { + if strings.HasPrefix(part, "x:") { + hexPart := part[2:] + key, err := hex.DecodeString(hexPart) + if err != nil { + return nil, fmt.Errorf("decoding hex-encoded part #%d: /%s: %w", i, part, err) + } + keys[i] = key + } else { + key, err := url.PathUnescape(part) + if err != nil { + return nil, fmt.Errorf("decoding url-encoded part #%d: /%s: %w", i, part, err) + } + keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... + } + } + return keys, nil +} diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go new file mode 100644 index 0000000..0d6d335 --- /dev/null +++ b/crypto/merkle/proof_key_path_test.go @@ -0,0 +1,45 @@ +package merkle + +import ( + // it is ok to use math/rand here: we do not need a cryptographically secure random + // number generator here and we can run the tests a bit faster + crand "crypto/rand" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKeyPath(t *testing.T) { + var path KeyPath + keys := make([][]byte, 10) + alphanum := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + + for d := 0; d < 1e4; d++ { + path = nil + + for i := range keys { + enc := keyEncoding(rand.Intn(int(KeyEncodingMax))) + keys[i] = make([]byte, rand.Uint32()%20) + switch enc { + case KeyEncodingURL: + for j := range keys[i] { + keys[i][j] = alphanum[rand.Intn(len(alphanum))] + } + case KeyEncodingHex: + _, _ = crand.Read(keys[i]) + default: + panic("Unexpected encoding") + } + path = path.AppendKey(keys[i], enc) + } + + res, err := KeyPathToKeys(path.String()) + require.Nil(t, err) + require.Equal(t, len(keys), len(res)) + + for i, key := range keys { + require.Equal(t, key, res[i]) + } + } +} diff --git a/crypto/merkle/rfc6962_test.go b/crypto/merkle/rfc6962_test.go new file mode 100644 index 0000000..9f15cd1 --- /dev/null +++ b/crypto/merkle/rfc6962_test.go @@ -0,0 +1,105 @@ +package merkle + +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// These tests were taken from https://github.com/google/trillian/blob/master/merkle/rfc6962/rfc6962_test.go, +// and consequently fall under the above license. +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" +) + +func TestRFC6962Hasher(t *testing.T) { + _, leafHashTrail := trailsFromByteSlices([][]byte{[]byte("L123456")}) + leafHash := leafHashTrail.Hash + _, leafHashTrail = trailsFromByteSlices([][]byte{{}}) + emptyLeafHash := leafHashTrail.Hash + _, emptyHashTrail := trailsFromByteSlices([][]byte{}) + emptyTreeHash := emptyHashTrail.Hash + for _, tc := range []struct { + desc string + got []byte + want string + }{ + // Check that empty trees return the hash of an empty string. + // echo -n '' | sha256sum + { + desc: "RFC6962 Empty Tree", + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:tmhash.Size*2], + got: emptyTreeHash, + }, + + // Check that the empty hash is not the same as the hash of an empty leaf. + // echo -n 00 | xxd -r -p | sha256sum + { + desc: "RFC6962 Empty Leaf", + want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:tmhash.Size*2], + got: emptyLeafHash, + }, + // echo -n 004C313233343536 | xxd -r -p | sha256sum + { + desc: "RFC6962 Leaf", + want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:tmhash.Size*2], + got: leafHash, + }, + // echo -n 014E3132334E343536 | xxd -r -p | sha256sum + { + desc: "RFC6962 Node", + want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:tmhash.Size*2], + got: innerHash([]byte("N123"), []byte("N456")), + }, + } { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + wantBytes, err := hex.DecodeString(tc.want) + if err != nil { + t.Fatalf("hex.DecodeString(%x): %v", tc.want, err) + } + if got, want := tc.got, wantBytes; !bytes.Equal(got, want) { + t.Errorf("got %x, want %x", got, want) + } + }) + } +} + +func TestRFC6962HasherCollisions(t *testing.T) { + // Check that different leaves have different hashes. + leaf1, leaf2 := []byte("Hello"), []byte("World") + _, leafHashTrail := trailsFromByteSlices([][]byte{leaf1}) + hash1 := leafHashTrail.Hash + _, leafHashTrail = trailsFromByteSlices([][]byte{leaf2}) + hash2 := leafHashTrail.Hash + if bytes.Equal(hash1, hash2) { + t.Errorf("leaf hashes should differ, but both are %x", hash1) + } + // Compute an intermediate subtree hash. + _, subHash1Trail := trailsFromByteSlices([][]byte{hash1, hash2}) + subHash1 := subHash1Trail.Hash + // Check that this is not the same as a leaf hash of their concatenation. + preimage := append(hash1, hash2...) + _, forgedHashTrail := trailsFromByteSlices([][]byte{preimage}) + forgedHash := forgedHashTrail.Hash + if bytes.Equal(subHash1, forgedHash) { + t.Errorf("hasher is not second-preimage resistant") + } + // Swap the order of nodes and check that the hash is different. + _, subHash2Trail := trailsFromByteSlices([][]byte{hash2, hash1}) + subHash2 := subHash2Trail.Hash + if bytes.Equal(subHash1, subHash2) { + t.Errorf("subtree hash does not depend on the order of leaves") + } +} diff --git a/crypto/merkle/tree.go b/crypto/merkle/tree.go new file mode 100644 index 0000000..896b67c --- /dev/null +++ b/crypto/merkle/tree.go @@ -0,0 +1,112 @@ +package merkle + +import ( + "crypto/sha256" + "hash" + "math/bits" +) + +// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. It follows RFC-6962. +func HashFromByteSlices(items [][]byte) []byte { + return hashFromByteSlices(sha256.New(), items) +} + +func hashFromByteSlices(sha hash.Hash, items [][]byte) []byte { + switch len(items) { + case 0: + return emptyHash() + case 1: + return leafHashOpt(sha, items[0]) + default: + k := getSplitPoint(int64(len(items))) + left := hashFromByteSlices(sha, items[:k]) + right := hashFromByteSlices(sha, items[k:]) + return innerHashOpt(sha, left, right) + } +} + +// HashFromByteSliceIterative is an iterative alternative to +// HashFromByteSlice motivated by potential performance improvements. +// (#2611) had suggested that an iterative version of +// HashFromByteSlice would be faster, presumably because +// we can envision some overhead accumulating from stack +// frames and function calls. Additionally, a recursive algorithm risks +// hitting the stack limit and causing a stack overflow should the tree +// be too large. +// +// Provided here is an iterative alternative, a test to assert +// correctness and a benchmark. On the performance side, there appears to +// be no overall difference: +// +// BenchmarkHashAlternatives/recursive-4 20000 77677 ns/op +// BenchmarkHashAlternatives/iterative-4 20000 76802 ns/op +// +// On the surface it might seem that the additional overhead is due to +// the different allocation patterns of the implementations. The recursive +// version uses a single [][]byte slices which it then re-slices at each level of the tree. +// The iterative version reproduces [][]byte once within the function and +// then rewrites sub-slices of that array at each level of the tree. +// +// Experimenting by modifying the code to simply calculate the +// hash and not store the result show little to no difference in performance. +// +// These preliminary results suggest: +// +// 1. The performance of the HashFromByteSlice is pretty good +// 2. Go has low overhead for recursive functions +// 3. The performance of the HashFromByteSlice routine is dominated +// by the actual hashing of data +// +// Although this work is in no way exhaustive, point #3 suggests that +// optimization of this routine would need to take an alternative +// approach to make significant improvements on the current performance. +// +// Finally, considering that the recursive implementation is easier to +// read, it might not be worthwhile to switch to a less intuitive +// implementation for so little benefit. +func HashFromByteSlicesIterative(input [][]byte) []byte { + items := make([][]byte, len(input)) + sha := sha256.New() + for i, leaf := range input { + items[i] = leafHash(leaf) + } + + size := len(items) + for { + switch size { + case 0: + return emptyHash() + case 1: + return items[0] + default: + rp := 0 // read position + wp := 0 // write position + for rp < size { + if rp+1 < size { + items[wp] = innerHashOpt(sha, items[rp], items[rp+1]) + rp += 2 + } else { + items[wp] = items[rp] + rp++ + } + wp++ + } + size = wp + } + } +} + +// getSplitPoint returns the largest power of 2 less than length +func getSplitPoint(length int64) int64 { + if length < 1 { + panic("Trying to split a tree with size < 1") + } + uLength := uint(length) + bitlen := bits.Len(uLength) + k := int64(1 << uint(bitlen-1)) + if k == length { + k >>= 1 + } + return k +} diff --git a/crypto/merkle/tree_test.go b/crypto/merkle/tree_test.go new file mode 100644 index 0000000..2caa944 --- /dev/null +++ b/crypto/merkle/tree_test.go @@ -0,0 +1,161 @@ +package merkle + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmtrand "github.com/strangelove-ventures/cometbft-client/libs/rand" + . "github.com/strangelove-ventures/cometbft-client/libs/test" + + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" +) + +type testItem []byte + +func (tI testItem) Hash() []byte { + return []byte(tI) +} + +func TestHashFromByteSlices(t *testing.T) { + testcases := map[string]struct { + slices [][]byte + expectHash string // in hex format + }{ + "nil": {nil, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + "empty": {[][]byte{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + "single": {[][]byte{{1, 2, 3}}, "054edec1d0211f624fed0cbca9d4f9400b0e491c43742af2c5b0abebf0c990d8"}, + "single blank": {[][]byte{{}}, "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"}, + "two": {[][]byte{{1, 2, 3}, {4, 5, 6}}, "82e6cfce00453804379b53962939eaa7906b39904be0813fcadd31b100773c4b"}, + "many": { + [][]byte{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}}, + "f326493eceab4f2d9ffbc78c59432a0a005d6ea98392045c74df5d14a113be18", + }, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + hash := HashFromByteSlices(tc.slices) + assert.Equal(t, tc.expectHash, hex.EncodeToString(hash)) + }) + } +} + +func TestProof(t *testing.T) { + + // Try an empty proof first + rootHash, proofs := ProofsFromByteSlices([][]byte{}) + require.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(rootHash)) + require.Empty(t, proofs) + + total := 100 + + items := make([][]byte, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmtrand.Bytes(tmhash.Size)) + } + + rootHash = HashFromByteSlices(items) + + rootHash2, proofs := ProofsFromByteSlices(items) + + require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2) + + // For each item, check the trail. + for i, item := range items { + proof := proofs[i] + + // Check total/index + require.EqualValues(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) + + require.EqualValues(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) + + // Verify success + err := proof.Verify(rootHash, item) + require.NoError(t, err, "Verification failed: %v.", err) + + // Trail too long should make it fail + origAunts := proof.Aunts + proof.Aunts = append(proof.Aunts, cmtrand.Bytes(32)) + err = proof.Verify(rootHash, item) + require.Error(t, err, "Expected verification to fail for wrong trail length") + + proof.Aunts = origAunts + + // Trail too short should make it fail + proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] + err = proof.Verify(rootHash, item) + require.Error(t, err, "Expected verification to fail for wrong trail length") + + proof.Aunts = origAunts + + // Mutating the itemHash should make it fail. + err = proof.Verify(rootHash, MutateByteSlice(item)) + require.Error(t, err, "Expected verification to fail for mutated leaf hash") + + // Mutating the rootHash should make it fail. + err = proof.Verify(MutateByteSlice(rootHash), item) + require.Error(t, err, "Expected verification to fail for mutated root hash") + } +} + +func TestHashAlternatives(t *testing.T) { + + total := 100 + + items := make([][]byte, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmtrand.Bytes(tmhash.Size)) + } + + rootHash1 := HashFromByteSlicesIterative(items) + rootHash2 := HashFromByteSlices(items) + require.Equal(t, rootHash1, rootHash2, "Unmatched root hashes: %X vs %X", rootHash1, rootHash2) +} + +func BenchmarkHashAlternatives(b *testing.B) { + total := 100 + + items := make([][]byte, total) + for i := 0; i < total; i++ { + items[i] = testItem(cmtrand.Bytes(tmhash.Size)) + } + + b.ResetTimer() + b.Run("recursive", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = HashFromByteSlices(items) + } + }) + + b.Run("iterative", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = HashFromByteSlicesIterative(items) + } + }) +} + +func Test_getSplitPoint(t *testing.T) { + tests := []struct { + length int64 + want int64 + }{ + {1, 0}, + {2, 1}, + {3, 2}, + {4, 2}, + {5, 4}, + {10, 8}, + {20, 16}, + {100, 64}, + {255, 128}, + {256, 128}, + {257, 256}, + } + for _, tt := range tests { + got := getSplitPoint(tt.length) + require.EqualValues(t, tt.want, got, "getSplitPoint(%d) = %v, want %v", tt.length, got, tt.want) + } +} diff --git a/crypto/merkle/types.go b/crypto/merkle/types.go new file mode 100644 index 0000000..e4ab050 --- /dev/null +++ b/crypto/merkle/types.go @@ -0,0 +1,20 @@ +package merkle + +// Tree is a Merkle tree interface. +type Tree interface { + Size() (size int) + Height() (height int8) + Has(key []byte) (has bool) + Proof(key []byte) (value []byte, proof []byte, exists bool) // TODO make it return an index + Get(key []byte) (index int, value []byte, exists bool) + GetByIndex(index int) (key []byte, value []byte) + Set(key []byte, value []byte) (updated bool) + Remove(key []byte) (value []byte, removed bool) + HashWithCount() (hash []byte, count int) + Hash() (hash []byte) + Save() (hash []byte) + Load(hash []byte) + Copy() Tree + Iterate(func(key []byte, value []byte) (stop bool)) (stopped bool) + IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) +} diff --git a/crypto/random.go b/crypto/random.go new file mode 100644 index 0000000..275fb10 --- /dev/null +++ b/crypto/random.go @@ -0,0 +1,35 @@ +package crypto + +import ( + crand "crypto/rand" + "encoding/hex" + "io" +) + +// This only uses the OS's randomness +func randBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + panic(err) + } + return b +} + +// This only uses the OS's randomness +func CRandBytes(numBytes int) []byte { + return randBytes(numBytes) +} + +// CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long. +// +// Note: CRandHex(24) gives 96 bits of randomness that +// are usually strong enough for most purposes. +func CRandHex(numDigits int) string { + return hex.EncodeToString(CRandBytes(numDigits / 2)) +} + +// Returns a crand.Reader. +func CReader() io.Reader { + return crand.Reader +} diff --git a/crypto/random_test.go b/crypto/random_test.go new file mode 100644 index 0000000..2d1cb34 --- /dev/null +++ b/crypto/random_test.go @@ -0,0 +1,23 @@ +package crypto_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/crypto" +) + +// the purpose of this test is primarily to ensure that the randomness +// generation won't error. +func TestRandomConsistency(t *testing.T) { + x1 := crypto.CRandBytes(256) + x2 := crypto.CRandBytes(256) + x3 := crypto.CRandBytes(256) + x4 := crypto.CRandBytes(256) + x5 := crypto.CRandBytes(256) + require.NotEqual(t, x1, x2) + require.NotEqual(t, x3, x4) + require.NotEqual(t, x4, x5) + require.NotEqual(t, x1, x5) +} diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go new file mode 100644 index 0000000..b0d3f7f --- /dev/null +++ b/crypto/secp256k1/secp256k1.go @@ -0,0 +1,227 @@ +package secp256k1 + +import ( + "bytes" + "crypto/sha256" + "crypto/subtle" + "fmt" + "io" + "math/big" + + secp256k1 "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/ecdsa" + "golang.org/x/crypto/ripemd160" //nolint: staticcheck // necessary for Bitcoin address format + + "github.com/strangelove-ventures/cometbft-client/crypto" + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" +) + +// ------------------------------------- +const ( + PrivKeyName = "tendermint/PrivKeySecp256k1" + PubKeyName = "tendermint/PubKeySecp256k1" + + KeyType = "secp256k1" + PrivKeySize = 32 +) + +func init() { + cmtjson.RegisterType(PubKey{}, PubKeyName) + cmtjson.RegisterType(PrivKey{}, PrivKeyName) +} + +var _ crypto.PrivKey = PrivKey{} + +// PrivKey implements PrivKey. +type PrivKey []byte + +// Bytes marshalls the private key using amino encoding. +func (privKey PrivKey) Bytes() []byte { + return []byte(privKey) +} + +// PubKey performs the point-scalar multiplication from the privKey on the +// generator point to get the pubkey. +func (privKey PrivKey) PubKey() crypto.PubKey { + _, pubkeyObject := secp256k1.PrivKeyFromBytes(privKey) + + pk := pubkeyObject.SerializeCompressed() + + return PubKey(pk) +} + +// Equals - you probably don't need to use this. +// Runs in constant time based on length of the keys. +func (privKey PrivKey) Equals(other crypto.PrivKey) bool { + if otherSecp, ok := other.(PrivKey); ok { + return subtle.ConstantTimeCompare(privKey[:], otherSecp[:]) == 1 + } + return false +} + +func (privKey PrivKey) Type() string { + return KeyType +} + +// GenPrivKey generates a new ECDSA private key on curve secp256k1 private key. +// It uses OS randomness to generate the private key. +func GenPrivKey() PrivKey { + return genPrivKey(crypto.CReader()) +} + +// genPrivKey generates a new secp256k1 private key using the provided reader. +func genPrivKey(rand io.Reader) PrivKey { + var privKeyBytes [PrivKeySize]byte + d := new(big.Int) + + for { + privKeyBytes = [PrivKeySize]byte{} + _, err := io.ReadFull(rand, privKeyBytes[:]) + if err != nil { + panic(err) + } + + d.SetBytes(privKeyBytes[:]) + // break if we found a valid point (i.e. > 0 and < N == curverOrder) + isValidFieldElement := 0 < d.Sign() && d.Cmp(secp256k1.S256().N) < 0 + if isValidFieldElement { + break + } + } + + return PrivKey(privKeyBytes[:]) +} + +var one = new(big.Int).SetInt64(1) + +// GenPrivKeySecp256k1 hashes the secret with SHA2, and uses +// that 32 byte output to create the private key. +// +// It makes sure the private key is a valid field element by setting: +// +// c = sha256(secret) +// k = (c mod (n − 1)) + 1, where n = curve order. +// +// NOTE: secret should be the output of a KDF like bcrypt, +// if it's derived from user input. +func GenPrivKeySecp256k1(secret []byte) PrivKey { + secHash := sha256.Sum256(secret) + // to guarantee that we have a valid field element, we use the approach of: + // "Suite B Implementer’s Guide to FIPS 186-3", A.2.1 + // https://apps.nsa.gov/iaarchive/library/ia-guidance/ia-solutions-for-classified/algorithm-guidance/suite-b-implementers-guide-to-fips-186-3-ecdsa.cfm + // see also https://github.com/golang/go/blob/0380c9ad38843d523d9c9804fe300cb7edd7cd3c/src/crypto/ecdsa/ecdsa.go#L89-L101 + fe := new(big.Int).SetBytes(secHash[:]) + n := new(big.Int).Sub(secp256k1.S256().N, one) + fe.Mod(fe, n) + fe.Add(fe, one) + + feB := fe.Bytes() + privKey32 := make([]byte, PrivKeySize) + // copy feB over to fixed 32 byte privKey32 and pad (if necessary) + copy(privKey32[32-len(feB):32], feB) + + return PrivKey(privKey32) +} + +// Sign creates an ECDSA signature on curve Secp256k1, using SHA256 on the msg. +// The returned signature will be of the form R || S (in lower-S form). +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + priv, _ := secp256k1.PrivKeyFromBytes(privKey) + + sig, err := ecdsa.SignCompact(priv, crypto.Sha256(msg), false) + if err != nil { + return nil, err + } + + // remove the first byte which is compactSigRecoveryCode + return sig[1:], nil +} + +//------------------------------------- + +var _ crypto.PubKey = PubKey{} + +// PubKeySize is comprised of 32 bytes for one field element +// (the x-coordinate), plus one byte for the parity of the y-coordinate. +const PubKeySize = 33 + +// PubKey implements crypto.PubKey. +// It is the compressed form of the pubkey. The first byte depends is a 0x02 byte +// if the y-coordinate is the lexicographically largest of the two associated with +// the x-coordinate. Otherwise the first byte is a 0x03. +// This prefix is followed with the x-coordinate. +type PubKey []byte + +// Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) +func (pubKey PubKey) Address() crypto.Address { + if len(pubKey) != PubKeySize { + panic("length of pubkey is incorrect") + } + hasherSHA256 := sha256.New() + _, _ = hasherSHA256.Write(pubKey) // does not error + sha := hasherSHA256.Sum(nil) + + hasherRIPEMD160 := ripemd160.New() + _, _ = hasherRIPEMD160.Write(sha) // does not error + + return crypto.Address(hasherRIPEMD160.Sum(nil)) +} + +// Bytes returns the pubkey marshaled with amino encoding. +func (pubKey PubKey) Bytes() []byte { + return []byte(pubKey) +} + +func (pubKey PubKey) String() string { + return fmt.Sprintf("PubKeySecp256k1{%X}", []byte(pubKey)) +} + +func (pubKey PubKey) Equals(other crypto.PubKey) bool { + if otherSecp, ok := other.(PubKey); ok { + return bytes.Equal(pubKey[:], otherSecp[:]) + } + return false +} + +func (pubKey PubKey) Type() string { + return KeyType +} + +// VerifySignature verifies a signature of the form R || S. +// It rejects signatures which are not in lower-S form. +func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool { + if len(sigStr) != 64 { + return false + } + + pub, err := secp256k1.ParsePubKey(pubKey) + if err != nil { + return false + } + + // parse the signature: + signature := signatureFromBytes(sigStr) + // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't. + // see: https://github.com/ethereum/go-ethereum/blob/f9401ae011ddf7f8d2d95020b7446c17f8d98dc1/crypto/signature_nocgo.go#L90-L93 + // Serialize() would negate S value if it is over half order. + // Hence, if the signature is different after Serialize() if should be rejected. + var modifiedSignature, parseErr = ecdsa.ParseDERSignature(signature.Serialize()) + if parseErr != nil { + return false + } + if !signature.IsEqual(modifiedSignature) { + return false + } + + return signature.Verify(crypto.Sha256(msg), pub) +} + +// Read Signature struct from R || S. Caller needs to ensure +// that len(sigStr) == 64. +func signatureFromBytes(sigStr []byte) *ecdsa.Signature { + var r secp256k1.ModNScalar + r.SetByteSlice(sigStr[:32]) + var s secp256k1.ModNScalar + s.SetByteSlice(sigStr[32:64]) + return ecdsa.NewSignature(&r, &s) +} diff --git a/crypto/secp256k1/secp256k1_internal_test.go b/crypto/secp256k1/secp256k1_internal_test.go new file mode 100644 index 0000000..ae1f55e --- /dev/null +++ b/crypto/secp256k1/secp256k1_internal_test.go @@ -0,0 +1,84 @@ +package secp256k1 + +import ( + "bytes" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + secp256k1 "github.com/btcsuite/btcd/btcec/v2" +) + +func Test_genPrivKey(t *testing.T) { + + empty := make([]byte, 32) + oneB := big.NewInt(1).Bytes() + onePadded := make([]byte, 32) + copy(onePadded[32-len(oneB):32], oneB) + t.Logf("one padded: %v, len=%v", onePadded, len(onePadded)) + + validOne := append(empty, onePadded...) + tests := []struct { + name string + notSoRand []byte + shouldPanic bool + }{ + {"empty bytes (panics because 1st 32 bytes are zero and 0 is not a valid field element)", empty, true}, + {"curve order: N", secp256k1.S256().N.Bytes(), true}, + {"valid because 0 < 1 < N", validOne, false}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + if tt.shouldPanic { + require.Panics(t, func() { + genPrivKey(bytes.NewReader(tt.notSoRand)) + }) + return + } + got := genPrivKey(bytes.NewReader(tt.notSoRand)) + fe := new(big.Int).SetBytes(got[:]) + require.True(t, fe.Cmp(secp256k1.S256().N) < 0) + require.True(t, fe.Sign() > 0) + }) + } +} + +// Ensure that signature verification works, and that +// non-canonical signatures fail. +// Note: run with CGO_ENABLED=0 or go test -tags !cgo. +func TestSignatureVerificationAndRejectUpperS(t *testing.T) { + msg := []byte("We have lingered long enough on the shores of the cosmic ocean.") + for i := 0; i < 500; i++ { + priv := GenPrivKey() + sigStr, err := priv.Sign(msg) + require.NoError(t, err) + var r secp256k1.ModNScalar + r.SetByteSlice(sigStr[:32]) + var s secp256k1.ModNScalar + s.SetByteSlice(sigStr[32:64]) + require.False(t, s.IsOverHalfOrder()) + + pub := priv.PubKey() + require.True(t, pub.VerifySignature(msg, sigStr)) + + // malleate: + var S256 secp256k1.ModNScalar + S256.SetByteSlice(secp256k1.S256().N.Bytes()) + s.Negate().Add(&S256) + require.True(t, s.IsOverHalfOrder()) + + rBytes := r.Bytes() + sBytes := s.Bytes() + malSigStr := make([]byte, 64) + copy(malSigStr[32-len(rBytes):32], rBytes[:]) + copy(malSigStr[64-len(sBytes):64], sBytes[:]) + + require.False(t, pub.VerifySignature(msg, malSigStr), + "VerifyBytes incorrect with malleated & invalid S. sig=%v, key=%v", + malSigStr, + priv, + ) + } +} diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go new file mode 100644 index 0000000..e2d5986 --- /dev/null +++ b/crypto/secp256k1/secp256k1_test.go @@ -0,0 +1,116 @@ +package secp256k1_test + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/btcsuite/btcd/btcutil/base58" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/crypto" + "github.com/strangelove-ventures/cometbft-client/crypto/secp256k1" + + underlyingSecp256k1 "github.com/btcsuite/btcd/btcec/v2" +) + +type keyData struct { + priv string + pub string + addr string +} + +var secpDataTable = []keyData{ + { + priv: "a96e62ed3955e65be32703f12d87b6b5cf26039ecfa948dc5107a495418e5330", + pub: "02950e1cdfcb133d6024109fd489f734eeb4502418e538c28481f22bce276f248c", + addr: "1CKZ9Nx4zgds8tU7nJHotKSDr4a9bYJCa3", + }, +} + +func TestPubKeySecp256k1Address(t *testing.T) { + for _, d := range secpDataTable { + privB, _ := hex.DecodeString(d.priv) + pubB, _ := hex.DecodeString(d.pub) + addrBbz, _, _ := base58.CheckDecode(d.addr) + addrB := crypto.Address(addrBbz) + + priv := secp256k1.PrivKey(privB) + + pubKey := priv.PubKey() + pubT, _ := pubKey.(secp256k1.PubKey) + pub := pubT + addr := pubKey.Address() + + assert.Equal(t, pub, secp256k1.PubKey(pubB), "Expected pub keys to match") + assert.Equal(t, addr, addrB, "Expected addresses to match") + } +} + +func TestSignAndValidateSecp256k1(t *testing.T) { + privKey := secp256k1.GenPrivKey() + pubKey := privKey.PubKey() + + msg := crypto.CRandBytes(128) + sig, err := privKey.Sign(msg) + require.Nil(t, err) + + assert.True(t, pubKey.VerifySignature(msg, sig)) + + // Mutate the signature, just one bit. + sig[3] ^= byte(0x01) + + assert.False(t, pubKey.VerifySignature(msg, sig)) +} + +// This test is intended to justify the removal of calls to the underlying library +// in creating the privkey. +func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) { + numberOfTests := 256 + for i := 0; i < numberOfTests; i++ { + // Seed the test case with some random bytes + privKeyBytes := [32]byte{} + copy(privKeyBytes[:], crypto.CRandBytes(32)) + + // This function creates a private and public key in the underlying libraries format. + // The private key is basically calling new(big.Int).SetBytes(pk), which removes leading zero bytes + priv, _ := underlyingSecp256k1.PrivKeyFromBytes(privKeyBytes[:]) + // this takes the bytes returned by `(big int).Bytes()`, and if the length is less than 32 bytes, + // pads the bytes from the left with zero bytes. Therefore these two functions composed + // result in the identity function on privKeyBytes, hence the following equality check + // always returning true. + serializedBytes := priv.Serialize() + require.Equal(t, privKeyBytes[:], serializedBytes) + } +} + +func TestGenPrivKeySecp256k1(t *testing.T) { + // curve oder N + N := underlyingSecp256k1.S256().N + tests := []struct { + name string + secret []byte + }{ + {"empty secret", []byte{}}, + { + "some long secret", + []byte("We live in a society exquisitely dependent on science and technology, " + + "in which hardly anyone knows anything about science and technology."), + }, + {"another seed used in cosmos tests #1", []byte{0}}, + {"another seed used in cosmos tests #2", []byte("mySecret")}, + {"another seed used in cosmos tests #3", []byte("")}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + gotPrivKey := secp256k1.GenPrivKeySecp256k1(tt.secret) + require.NotNil(t, gotPrivKey) + // interpret as a big.Int and make sure it is a valid field element: + fe := new(big.Int).SetBytes(gotPrivKey[:]) + require.True(t, fe.Cmp(N) < 0) + require.True(t, fe.Sign() > 0) + }) + } +} diff --git a/crypto/sr25519/batch.go b/crypto/sr25519/batch.go new file mode 100644 index 0000000..91130f7 --- /dev/null +++ b/crypto/sr25519/batch.go @@ -0,0 +1,46 @@ +package sr25519 + +import ( + "fmt" + + "github.com/oasisprotocol/curve25519-voi/primitives/sr25519" + + "github.com/strangelove-ventures/cometbft-client/crypto" +) + +var _ crypto.BatchVerifier = &BatchVerifier{} + +// BatchVerifier implements batch verification for sr25519. +type BatchVerifier struct { + *sr25519.BatchVerifier +} + +func NewBatchVerifier() crypto.BatchVerifier { + return &BatchVerifier{sr25519.NewBatchVerifier()} +} + +func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error { + pk, ok := key.(PubKey) + if !ok { + return fmt.Errorf("sr25519: pubkey is not sr25519") + } + + var srpk sr25519.PublicKey + if err := srpk.UnmarshalBinary(pk); err != nil { + return fmt.Errorf("sr25519: invalid public key: %w", err) + } + + var sig sr25519.Signature + if err := sig.UnmarshalBinary(signature); err != nil { + return fmt.Errorf("sr25519: unable to decode signature: %w", err) + } + + st := signingCtx.NewTranscriptBytes(msg) + b.BatchVerifier.Add(&srpk, st, &sig) + + return nil +} + +func (b *BatchVerifier) Verify() (bool, []bool) { + return b.BatchVerifier.Verify(crypto.CReader()) +} diff --git a/crypto/sr25519/encoding.go b/crypto/sr25519/encoding.go new file mode 100644 index 0000000..8bf0ebb --- /dev/null +++ b/crypto/sr25519/encoding.go @@ -0,0 +1,13 @@ +package sr25519 + +import cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" + +const ( + PrivKeyName = "tendermint/PrivKeySr25519" + PubKeyName = "tendermint/PubKeySr25519" +) + +func init() { + cmtjson.RegisterType(PubKey{}, PubKeyName) + cmtjson.RegisterType(PrivKey{}, PrivKeyName) +} diff --git a/crypto/sr25519/privkey.go b/crypto/sr25519/privkey.go new file mode 100644 index 0000000..6588284 --- /dev/null +++ b/crypto/sr25519/privkey.go @@ -0,0 +1,164 @@ +package sr25519 + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/oasisprotocol/curve25519-voi/primitives/sr25519" + + "github.com/strangelove-ventures/cometbft-client/crypto" +) + +var ( + _ crypto.PrivKey = PrivKey{} + + signingCtx = sr25519.NewSigningContext([]byte{}) +) + +const ( + // PrivKeySize is the number of bytes in an Sr25519 private key. + PrivKeySize = 32 + + KeyType = "sr25519" +) + +// PrivKey implements crypto.PrivKey. +type PrivKey struct { + msk sr25519.MiniSecretKey + kp *sr25519.KeyPair +} + +// Bytes returns the byte representation of the PrivKey. +func (privKey PrivKey) Bytes() []byte { + if privKey.kp == nil { + return nil + } + return privKey.msk[:] +} + +// Sign produces a signature on the provided message. +func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { + if privKey.kp == nil { + return nil, fmt.Errorf("sr25519: uninitialized private key") + } + + st := signingCtx.NewTranscriptBytes(msg) + + sig, err := privKey.kp.Sign(crypto.CReader(), st) + if err != nil { + return nil, fmt.Errorf("sr25519: failed to sign message: %w", err) + } + + sigBytes, err := sig.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("sr25519: failed to serialize signature: %w", err) + } + + return sigBytes, nil +} + +// PubKey gets the corresponding public key from the private key. +func (privKey PrivKey) PubKey() crypto.PubKey { + if privKey.kp == nil { + panic("sr25519: uninitialized private key") + } + + b, err := privKey.kp.PublicKey().MarshalBinary() + if err != nil { + panic("sr25519: failed to serialize public key: " + err.Error()) + } + + return PubKey(b) +} + +// Equals - you probably don't need to use this. +// Runs in constant time based on length of the keys. +func (privKey PrivKey) Equals(other crypto.PrivKey) bool { + if otherSr, ok := other.(PrivKey); ok { + return privKey.msk.Equal(&otherSr.msk) + } + return false +} + +func (privKey PrivKey) Type() string { + return KeyType +} + +func (privKey PrivKey) MarshalJSON() ([]byte, error) { + var b []byte + + // Handle uninitialized private keys gracefully. + if privKey.kp != nil { + b = privKey.Bytes() + } + + return json.Marshal(b) +} + +func (privKey *PrivKey) UnmarshalJSON(data []byte) error { + for i := range privKey.msk { + privKey.msk[i] = 0 + } + privKey.kp = nil + + var b []byte + if err := json.Unmarshal(data, &b); err != nil { + return fmt.Errorf("sr25519: failed to deserialize JSON: %w", err) + } + if len(b) == 0 { + return nil + } + + msk, err := sr25519.NewMiniSecretKeyFromBytes(b) + if err != nil { + return err + } + + sk := msk.ExpandEd25519() + + privKey.msk = *msk + privKey.kp = sk.KeyPair() + + return nil +} + +// GenPrivKey generates a new sr25519 private key. +// It uses OS randomness in conjunction with the current global random seed +// in cometbft/libs/rand to generate the private key. +func GenPrivKey() PrivKey { + return genPrivKey(crypto.CReader()) +} + +// genPrivKey generates a new sr25519 private key using the provided reader. +func genPrivKey(rng io.Reader) PrivKey { + msk, err := sr25519.GenerateMiniSecretKey(rng) + if err != nil { + panic("sr25519: failed to generate MiniSecretKey: " + err.Error()) + } + + sk := msk.ExpandEd25519() + + return PrivKey{ + msk: *msk, + kp: sk.KeyPair(), + } +} + +// GenPrivKeyFromSecret hashes the secret with SHA2, and uses +// that 32 byte output to create the private key. +// NOTE: secret should be the output of a KDF like bcrypt, +// if it's derived from user input. +func GenPrivKeyFromSecret(secret []byte) PrivKey { + seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. + + var privKey PrivKey + if err := privKey.msk.UnmarshalBinary(seed); err != nil { + panic("sr25519: failed to deserialize MiniSecretKey: " + err.Error()) + } + + sk := privKey.msk.ExpandEd25519() + privKey.kp = sk.KeyPair() + + return privKey +} diff --git a/crypto/sr25519/pubkey.go b/crypto/sr25519/pubkey.go new file mode 100644 index 0000000..4f4cd29 --- /dev/null +++ b/crypto/sr25519/pubkey.go @@ -0,0 +1,70 @@ +package sr25519 + +import ( + "bytes" + "fmt" + + "github.com/oasisprotocol/curve25519-voi/primitives/sr25519" + + "github.com/strangelove-ventures/cometbft-client/crypto" + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" +) + +var _ crypto.PubKey = PubKey{} + +const ( + // PubKeySize is the number of bytes in an Sr25519 public key. + PubKeySize = 32 + + // SignatureSize is the size of a Sr25519 signature in bytes. + SignatureSize = 64 +) + +// PubKey implements crypto.PubKey for the Sr25519 signature scheme. +type PubKey []byte + +// Address is the SHA256-20 of the raw pubkey bytes. +func (pubKey PubKey) Address() crypto.Address { + if len(pubKey) != PubKeySize { + panic("pubkey is incorrect size") + } + return crypto.Address(tmhash.SumTruncated(pubKey[:])) +} + +// Bytes returns the byte representation of the PubKey. +func (pubKey PubKey) Bytes() []byte { + return []byte(pubKey) +} + +// Equals - checks that two public keys are the same time +// Runs in constant time based on length of the keys. +func (pubKey PubKey) Equals(other crypto.PubKey) bool { + if otherSr, ok := other.(PubKey); ok { + return bytes.Equal(pubKey[:], otherSr[:]) + } + + return false +} + +func (pubKey PubKey) VerifySignature(msg []byte, sigBytes []byte) bool { + var srpk sr25519.PublicKey + if err := srpk.UnmarshalBinary(pubKey); err != nil { + return false + } + + var sig sr25519.Signature + if err := sig.UnmarshalBinary(sigBytes); err != nil { + return false + } + + st := signingCtx.NewTranscriptBytes(msg) + return srpk.Verify(st, &sig) +} + +func (pubKey PubKey) String() string { + return fmt.Sprintf("PubKeySr25519{%X}", []byte(pubKey)) +} + +func (pubKey PubKey) Type() string { + return KeyType +} diff --git a/crypto/sr25519/sr25519_test.go b/crypto/sr25519/sr25519_test.go new file mode 100644 index 0000000..d0cc4a4 --- /dev/null +++ b/crypto/sr25519/sr25519_test.go @@ -0,0 +1,98 @@ +package sr25519_test + +import ( + "encoding/base64" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/crypto" + "github.com/strangelove-ventures/cometbft-client/crypto/sr25519" +) + +func TestSignAndValidateSr25519(t *testing.T) { + privKey := sr25519.GenPrivKey() + pubKey := privKey.PubKey() + + msg := crypto.CRandBytes(128) + sig, err := privKey.Sign(msg) + require.Nil(t, err) + + // Test the signature + assert.True(t, pubKey.VerifySignature(msg, sig)) + assert.True(t, pubKey.VerifySignature(msg, sig)) + + // Mutate the signature, just one bit. + // TODO: Replace this with a much better fuzzer, tendermint/ed25519/issues/10 + sig[7] ^= byte(0x01) + + assert.False(t, pubKey.VerifySignature(msg, sig)) +} + +func TestBatchSafe(t *testing.T) { + v := sr25519.NewBatchVerifier() + vFail := sr25519.NewBatchVerifier() + for i := 0; i <= 38; i++ { + priv := sr25519.GenPrivKey() + pub := priv.PubKey() + + var msg []byte + if i%2 == 0 { + msg = []byte("easter") + } else { + msg = []byte("egg") + } + + sig, err := priv.Sign(msg) + require.NoError(t, err) + + err = v.Add(pub, msg, sig) + require.NoError(t, err) + + switch i % 2 { + case 0: + err = vFail.Add(pub, msg, sig) + case 1: + msg[2] ^= byte(0x01) + err = vFail.Add(pub, msg, sig) + } + require.NoError(t, err) + } + + ok, valid := v.Verify() + require.True(t, ok, "failed batch verification") + for i, ok := range valid { + require.Truef(t, ok, "sig[%d] should be marked valid", i) + } + + ok, valid = vFail.Verify() + require.False(t, ok, "succeeded batch verification (invalid batch)") + for i, ok := range valid { + expected := (i % 2) == 0 + require.Equalf(t, expected, ok, "sig[%d] should be %v", i, expected) + } +} + +func TestJSON(t *testing.T) { + privKey := sr25519.GenPrivKey() + + t.Run("PrivKey", func(t *testing.T) { + b, err := json.Marshal(privKey) + require.NoError(t, err) + + // b should be the base64 encoded MiniSecretKey, enclosed by doublequotes. + b64 := base64.StdEncoding.EncodeToString(privKey.Bytes()) + b64 = "\"" + b64 + "\"" + require.Equal(t, []byte(b64), b) + + var privKey2 sr25519.PrivKey + err = json.Unmarshal(b, &privKey2) + require.NoError(t, err) + require.Len(t, privKey2.Bytes(), sr25519.PrivKeySize) + require.EqualValues(t, privKey.Bytes(), privKey2.Bytes()) + }) + + // PubKeys are just []byte, so there is no special handling. +} diff --git a/crypto/tmhash/hash.go b/crypto/tmhash/hash.go new file mode 100644 index 0000000..f9b9582 --- /dev/null +++ b/crypto/tmhash/hash.go @@ -0,0 +1,65 @@ +package tmhash + +import ( + "crypto/sha256" + "hash" +) + +const ( + Size = sha256.Size + BlockSize = sha256.BlockSize +) + +// New returns a new hash.Hash. +func New() hash.Hash { + return sha256.New() +} + +// Sum returns the SHA256 of the bz. +func Sum(bz []byte) []byte { + h := sha256.Sum256(bz) + return h[:] +} + +//------------------------------------------------------------- + +const ( + TruncatedSize = 20 +) + +type sha256trunc struct { + sha256 hash.Hash +} + +func (h sha256trunc) Write(p []byte) (n int, err error) { + return h.sha256.Write(p) +} +func (h sha256trunc) Sum(b []byte) []byte { + shasum := h.sha256.Sum(b) + return shasum[:TruncatedSize] +} + +func (h sha256trunc) Reset() { + h.sha256.Reset() +} + +func (h sha256trunc) Size() int { + return TruncatedSize +} + +func (h sha256trunc) BlockSize() int { + return h.sha256.BlockSize() +} + +// NewTruncated returns a new hash.Hash. +func NewTruncated() hash.Hash { + return sha256trunc{ + sha256: sha256.New(), + } +} + +// SumTruncated returns the first 20 bytes of SHA256 of the bz. +func SumTruncated(bz []byte) []byte { + hash := sha256.Sum256(bz) + return hash[:TruncatedSize] +} diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go new file mode 100644 index 0000000..271655e --- /dev/null +++ b/crypto/tmhash/hash_test.go @@ -0,0 +1,48 @@ +package tmhash_test + +import ( + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" +) + +func TestHash(t *testing.T) { + testVector := []byte("abc") + hasher := tmhash.New() + _, err := hasher.Write(testVector) + require.NoError(t, err) + bz := hasher.Sum(nil) + + bz2 := tmhash.Sum(testVector) + + hasher = sha256.New() + _, err = hasher.Write(testVector) + require.NoError(t, err) + bz3 := hasher.Sum(nil) + + assert.Equal(t, bz, bz2) + assert.Equal(t, bz, bz3) +} + +func TestHashTruncated(t *testing.T) { + testVector := []byte("abc") + hasher := tmhash.NewTruncated() + _, err := hasher.Write(testVector) + require.NoError(t, err) + bz := hasher.Sum(nil) + + bz2 := tmhash.SumTruncated(testVector) + + hasher = sha256.New() + _, err = hasher.Write(testVector) + require.NoError(t, err) + bz3 := hasher.Sum(nil) + bz3 = bz3[:tmhash.TruncatedSize] + + assert.Equal(t, bz, bz2) + assert.Equal(t, bz, bz3) +} diff --git a/crypto/xchacha20poly1305/vector_test.go b/crypto/xchacha20poly1305/vector_test.go new file mode 100644 index 0000000..c6ca9d8 --- /dev/null +++ b/crypto/xchacha20poly1305/vector_test.go @@ -0,0 +1,122 @@ +package xchacha20poly1305 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +func toHex(bits []byte) string { + return hex.EncodeToString(bits) +} + +func fromHex(bits string) []byte { + b, err := hex.DecodeString(bits) + if err != nil { + panic(err) + } + return b +} + +func TestHChaCha20(t *testing.T) { + for i, v := range hChaCha20Vectors { + var key [32]byte + var nonce [16]byte + copy(key[:], v.key) + copy(nonce[:], v.nonce) + + HChaCha20(&key, &nonce, &key) + if !bytes.Equal(key[:], v.keystream) { + t.Errorf("test %d: keystream mismatch:\n \t got: %s\n \t want: %s", i, toHex(key[:]), toHex(v.keystream)) + } + } +} + +var hChaCha20Vectors = []struct { + key, nonce, keystream []byte +}{ + { + fromHex("0000000000000000000000000000000000000000000000000000000000000000"), + fromHex("000000000000000000000000000000000000000000000000"), + fromHex("1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"), + }, + { + fromHex("8000000000000000000000000000000000000000000000000000000000000000"), + fromHex("000000000000000000000000000000000000000000000000"), + fromHex("7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"), + }, + { + fromHex("0000000000000000000000000000000000000000000000000000000000000001"), + fromHex("000000000000000000000000000000000000000000000002"), + fromHex("e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"), + }, + { + fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"), + fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"), + fromHex("51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"), + }, + { + fromHex("24f11cce8a1b3d61e441561a696c1c1b7e173d084fd4812425435a8896a013dc"), + fromHex("d9660c5900ae19ddad28d6e06e45fe5e"), + fromHex("5966b3eec3bff1189f831f06afe4d4e3be97fa9235ec8c20d08acfbbb4e851e3"), + }, +} + +func TestVectors(t *testing.T) { + for i, v := range vectors { + if len(v.plaintext) == 0 { + v.plaintext = make([]byte, len(v.ciphertext)) + } + + var nonce [24]byte + copy(nonce[:], v.nonce) + + aead, err := New(v.key) + if err != nil { + t.Error(err) + } + + dst := aead.Seal(nil, nonce[:], v.plaintext, v.ad) + if !bytes.Equal(dst, v.ciphertext) { + t.Errorf("test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext)) + } + open, err := aead.Open(nil, nonce[:], dst, v.ad) + if err != nil { + t.Error(err) + } + if !bytes.Equal(open, v.plaintext) { + t.Errorf("test %d: plaintext mismatch:\n \t got: %s\n \t want: %s", i, string(open), string(v.plaintext)) + } + } +} + +var vectors = []struct { + key, nonce, ad, plaintext, ciphertext []byte +}{ + { + []byte{ + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, + 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, + 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + }, + []byte{0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b}, + []byte{0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7}, + []byte( + "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.", + ), + []byte{ + 0x45, 0x3c, 0x06, 0x93, 0xa7, 0x40, 0x7f, 0x04, 0xff, 0x4c, 0x56, + 0xae, 0xdb, 0x17, 0xa3, 0xc0, 0xa1, 0xaf, 0xff, 0x01, 0x17, 0x49, + 0x30, 0xfc, 0x22, 0x28, 0x7c, 0x33, 0xdb, 0xcf, 0x0a, 0xc8, 0xb8, + 0x9a, 0xd9, 0x29, 0x53, 0x0a, 0x1b, 0xb3, 0xab, 0x5e, 0x69, 0xf2, + 0x4c, 0x7f, 0x60, 0x70, 0xc8, 0xf8, 0x40, 0xc9, 0xab, 0xb4, 0xf6, + 0x9f, 0xbf, 0xc8, 0xa7, 0xff, 0x51, 0x26, 0xfa, 0xee, 0xbb, 0xb5, + 0x58, 0x05, 0xee, 0x9c, 0x1c, 0xf2, 0xce, 0x5a, 0x57, 0x26, 0x32, + 0x87, 0xae, 0xc5, 0x78, 0x0f, 0x04, 0xec, 0x32, 0x4c, 0x35, 0x14, + 0x12, 0x2c, 0xfc, 0x32, 0x31, 0xfc, 0x1a, 0x8b, 0x71, 0x8a, 0x62, + 0x86, 0x37, 0x30, 0xa2, 0x70, 0x2b, 0xb7, 0x63, 0x66, 0x11, 0x6b, + 0xed, 0x09, 0xe0, 0xfd, 0x5c, 0x6d, 0x84, 0xb6, 0xb0, 0xc1, 0xab, + 0xaf, 0x24, 0x9d, 0x5d, 0xd0, 0xf7, 0xf5, 0xa7, 0xea, + }, + }, +} diff --git a/crypto/xchacha20poly1305/xchachapoly.go b/crypto/xchacha20poly1305/xchachapoly.go new file mode 100644 index 0000000..2578520 --- /dev/null +++ b/crypto/xchacha20poly1305/xchachapoly.go @@ -0,0 +1,259 @@ +// Package xchacha20poly1305 creates an AEAD using hchacha, chacha, and poly1305 +// This allows for randomized nonces to be used in conjunction with chacha. +package xchacha20poly1305 + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "fmt" + + "golang.org/x/crypto/chacha20poly1305" +) + +// Implements crypto.AEAD +type xchacha20poly1305 struct { + key [KeySize]byte +} + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + // NonceSize is the size of the nonce used with this AEAD, in bytes. + NonceSize = 24 + // TagSize is the size added from poly1305 + TagSize = 16 + // MaxPlaintextSize is the max size that can be passed into a single call of Seal + MaxPlaintextSize = (1 << 38) - 64 + // MaxCiphertextSize is the max size that can be passed into a single call of Open, + // this differs from plaintext size due to the tag + MaxCiphertextSize = (1 << 38) - 48 + + // sigma are constants used in xchacha. + // Unrolled from a slice so that they can be inlined, as slices can't be constants. + sigma0 = uint32(0x61707865) + sigma1 = uint32(0x3320646e) + sigma2 = uint32(0x79622d32) + sigma3 = uint32(0x6b206574) +) + +// New returns a new xchachapoly1305 AEAD +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("xchacha20poly1305: bad key length") + } + ret := new(xchacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (c *xchacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *xchacha20poly1305) Overhead() int { + return TagSize +} + +func (c *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("xchacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > MaxPlaintextSize { + panic("xchacha20poly1305: plaintext too large") + } + + var subKey [KeySize]byte + var hNonce [16]byte + var subNonce [chacha20poly1305.NonceSize]byte + copy(hNonce[:], nonce[:16]) + + HChaCha20(&subKey, &hNonce, &c.key) + + // This can't error because we always provide a correctly sized key + chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) + + copy(subNonce[4:], nonce[16:]) + + return chacha20poly1305.Seal(dst, subNonce[:], plaintext, additionalData) +} + +func (c *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("xchacha20poly1305: bad nonce length passed to Open") + } + if uint64(len(ciphertext)) > MaxCiphertextSize { + return nil, fmt.Errorf("xchacha20poly1305: ciphertext too large") + } + var subKey [KeySize]byte + var hNonce [16]byte + var subNonce [chacha20poly1305.NonceSize]byte + copy(hNonce[:], nonce[:16]) + + HChaCha20(&subKey, &hNonce, &c.key) + + // This can't error because we always provide a correctly sized key + chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) + + copy(subNonce[4:], nonce[16:]) + + return chacha20poly1305.Open(dst, subNonce[:], ciphertext, additionalData) +} + +// HChaCha exported from +// https://github.com/aead/chacha20/blob/8b13a72661dae6e9e5dea04f344f0dc95ea29547/chacha/chacha_generic.go#L194 +// TODO: Add support for the different assembly instructions used there. + +// The MIT License (MIT) + +// Copyright (c) 2016 Andreas Auernhammer + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// HChaCha20 generates 32 pseudo-random bytes from a 128 bit nonce and a 256 bit secret key. +// It can be used as a key-derivation-function (KDF). +func HChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { hChaCha20Generic(out, nonce, key) } + +func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) { + v00 := sigma0 + v01 := sigma1 + v02 := sigma2 + v03 := sigma3 + v04 := binary.LittleEndian.Uint32(key[0:]) + v05 := binary.LittleEndian.Uint32(key[4:]) + v06 := binary.LittleEndian.Uint32(key[8:]) + v07 := binary.LittleEndian.Uint32(key[12:]) + v08 := binary.LittleEndian.Uint32(key[16:]) + v09 := binary.LittleEndian.Uint32(key[20:]) + v10 := binary.LittleEndian.Uint32(key[24:]) + v11 := binary.LittleEndian.Uint32(key[28:]) + v12 := binary.LittleEndian.Uint32(nonce[0:]) + v13 := binary.LittleEndian.Uint32(nonce[4:]) + v14 := binary.LittleEndian.Uint32(nonce[8:]) + v15 := binary.LittleEndian.Uint32(nonce[12:]) + + for i := 0; i < 20; i += 2 { + v00 += v04 + v12 ^= v00 + v12 = (v12 << 16) | (v12 >> 16) + v08 += v12 + v04 ^= v08 + v04 = (v04 << 12) | (v04 >> 20) + v00 += v04 + v12 ^= v00 + v12 = (v12 << 8) | (v12 >> 24) + v08 += v12 + v04 ^= v08 + v04 = (v04 << 7) | (v04 >> 25) + v01 += v05 + v13 ^= v01 + v13 = (v13 << 16) | (v13 >> 16) + v09 += v13 + v05 ^= v09 + v05 = (v05 << 12) | (v05 >> 20) + v01 += v05 + v13 ^= v01 + v13 = (v13 << 8) | (v13 >> 24) + v09 += v13 + v05 ^= v09 + v05 = (v05 << 7) | (v05 >> 25) + v02 += v06 + v14 ^= v02 + v14 = (v14 << 16) | (v14 >> 16) + v10 += v14 + v06 ^= v10 + v06 = (v06 << 12) | (v06 >> 20) + v02 += v06 + v14 ^= v02 + v14 = (v14 << 8) | (v14 >> 24) + v10 += v14 + v06 ^= v10 + v06 = (v06 << 7) | (v06 >> 25) + v03 += v07 + v15 ^= v03 + v15 = (v15 << 16) | (v15 >> 16) + v11 += v15 + v07 ^= v11 + v07 = (v07 << 12) | (v07 >> 20) + v03 += v07 + v15 ^= v03 + v15 = (v15 << 8) | (v15 >> 24) + v11 += v15 + v07 ^= v11 + v07 = (v07 << 7) | (v07 >> 25) + v00 += v05 + v15 ^= v00 + v15 = (v15 << 16) | (v15 >> 16) + v10 += v15 + v05 ^= v10 + v05 = (v05 << 12) | (v05 >> 20) + v00 += v05 + v15 ^= v00 + v15 = (v15 << 8) | (v15 >> 24) + v10 += v15 + v05 ^= v10 + v05 = (v05 << 7) | (v05 >> 25) + v01 += v06 + v12 ^= v01 + v12 = (v12 << 16) | (v12 >> 16) + v11 += v12 + v06 ^= v11 + v06 = (v06 << 12) | (v06 >> 20) + v01 += v06 + v12 ^= v01 + v12 = (v12 << 8) | (v12 >> 24) + v11 += v12 + v06 ^= v11 + v06 = (v06 << 7) | (v06 >> 25) + v02 += v07 + v13 ^= v02 + v13 = (v13 << 16) | (v13 >> 16) + v08 += v13 + v07 ^= v08 + v07 = (v07 << 12) | (v07 >> 20) + v02 += v07 + v13 ^= v02 + v13 = (v13 << 8) | (v13 >> 24) + v08 += v13 + v07 ^= v08 + v07 = (v07 << 7) | (v07 >> 25) + v03 += v04 + v14 ^= v03 + v14 = (v14 << 16) | (v14 >> 16) + v09 += v14 + v04 ^= v09 + v04 = (v04 << 12) | (v04 >> 20) + v03 += v04 + v14 ^= v03 + v14 = (v14 << 8) | (v14 >> 24) + v09 += v14 + v04 ^= v09 + v04 = (v04 << 7) | (v04 >> 25) + } + + binary.LittleEndian.PutUint32(out[0:], v00) + binary.LittleEndian.PutUint32(out[4:], v01) + binary.LittleEndian.PutUint32(out[8:], v02) + binary.LittleEndian.PutUint32(out[12:], v03) + binary.LittleEndian.PutUint32(out[16:], v12) + binary.LittleEndian.PutUint32(out[20:], v13) + binary.LittleEndian.PutUint32(out[24:], v14) + binary.LittleEndian.PutUint32(out[28:], v15) +} diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go new file mode 100644 index 0000000..6844f74 --- /dev/null +++ b/crypto/xchacha20poly1305/xchachapoly_test.go @@ -0,0 +1,113 @@ +package xchacha20poly1305 + +import ( + "bytes" + cr "crypto/rand" + mr "math/rand" + "testing" +) + +// The following test is taken from +// https://github.com/golang/crypto/blob/master/chacha20poly1305/chacha20poly1305_test.go#L69 +// It requires the below copyright notice, where "this source code" refers to the following function. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at the bottom of this file. +func TestRandom(t *testing.T) { + // Some random tests to verify Open(Seal) == Plaintext + for i := 0; i < 256; i++ { + var nonce [24]byte + var key [32]byte + + al := mr.Intn(128) + pl := mr.Intn(16384) + ad := make([]byte, al) + plaintext := make([]byte, pl) + _, err := cr.Read(key[:]) + if err != nil { + t.Errorf("error on read: %v", err) + } + _, err = cr.Read(nonce[:]) + if err != nil { + t.Errorf("error on read: %v", err) + } + _, err = cr.Read(ad) + if err != nil { + t.Errorf("error on read: %v", err) + } + _, err = cr.Read(plaintext) + if err != nil { + t.Errorf("error on read: %v", err) + } + + aead, err := New(key[:]) + if err != nil { + t.Fatal(err) + } + + ct := aead.Seal(nil, nonce[:], plaintext, ad) + + plaintext2, err := aead.Open(nil, nonce[:], ct, ad) + if err != nil { + t.Errorf("random #%d: Open failed", i) + continue + } + + if !bytes.Equal(plaintext, plaintext2) { + t.Errorf("random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext) + continue + } + + if len(ad) > 0 { + alterAdIdx := mr.Intn(len(ad)) + ad[alterAdIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("random #%d: Open was successful after altering additional data", i) + } + ad[alterAdIdx] ^= 0x80 + } + + alterNonceIdx := mr.Intn(aead.NonceSize()) + nonce[alterNonceIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("random #%d: Open was successful after altering nonce", i) + } + nonce[alterNonceIdx] ^= 0x80 + + alterCtIdx := mr.Intn(len(ct)) + ct[alterCtIdx] ^= 0x80 + if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { + t.Errorf("random #%d: Open was successful after altering ciphertext", i) + } + ct[alterCtIdx] ^= 0x80 + } +} + +// AFOREMENTIONED LICENSE +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go new file mode 100644 index 0000000..c9fdb3d --- /dev/null +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -0,0 +1,54 @@ +package xsalsa20symmetric + +import ( + "errors" + "fmt" + + "golang.org/x/crypto/nacl/secretbox" + + "github.com/strangelove-ventures/cometbft-client/crypto" +) + +// TODO, make this into a struct that implements crypto.Symmetric. + +const nonceLen = 24 +const secretLen = 32 + +// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) +// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. +func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { + if len(secret) != secretLen { + panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) + } + nonce := crypto.CRandBytes(nonceLen) + nonceArr := [nonceLen]byte{} + copy(nonceArr[:], nonce) + secretArr := [secretLen]byte{} + copy(secretArr[:], secret) + ciphertext = make([]byte, nonceLen+secretbox.Overhead+len(plaintext)) + copy(ciphertext, nonce) + secretbox.Seal(ciphertext[nonceLen:nonceLen], plaintext, &nonceArr, &secretArr) + return ciphertext +} + +// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) +// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. +func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { + if len(secret) != secretLen { + panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) + } + if len(ciphertext) <= secretbox.Overhead+nonceLen { + return nil, errors.New("ciphertext is too short") + } + nonce := ciphertext[:nonceLen] + nonceArr := [nonceLen]byte{} + copy(nonceArr[:], nonce) + secretArr := [secretLen]byte{} + copy(secretArr[:], secret) + plaintext = make([]byte, len(ciphertext)-nonceLen-secretbox.Overhead) + _, ok := secretbox.Open(plaintext[:0], ciphertext[nonceLen:], &nonceArr, &secretArr) + if !ok { + return nil, errors.New("ciphertext decryption failed") + } + return plaintext, nil +} diff --git a/crypto/xsalsa20symmetric/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go new file mode 100644 index 0000000..3cf34ba --- /dev/null +++ b/crypto/xsalsa20symmetric/symmetric_test.go @@ -0,0 +1,40 @@ +package xsalsa20symmetric + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "golang.org/x/crypto/bcrypt" + + "github.com/strangelove-ventures/cometbft-client/crypto" +) + +func TestSimple(t *testing.T) { + + plaintext := []byte("sometext") + secret := []byte("somesecretoflengththirtytwo===32") + ciphertext := EncryptSymmetric(plaintext, secret) + plaintext2, err := DecryptSymmetric(ciphertext, secret) + + require.Nil(t, err, "%+v", err) + assert.Equal(t, plaintext, plaintext2) +} + +func TestSimpleWithKDF(t *testing.T) { + + plaintext := []byte("sometext") + secretPass := []byte("somesecret") + secret, err := bcrypt.GenerateFromPassword(secretPass, 12) + if err != nil { + t.Error(err) + } + secret = crypto.Sha256(secret) + + ciphertext := EncryptSymmetric(plaintext, secret) + plaintext2, err := DecryptSymmetric(ciphertext, secret) + + require.Nil(t, err, "%+v", err) + assert.Equal(t, plaintext, plaintext2) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..60d577a --- /dev/null +++ b/go.mod @@ -0,0 +1,100 @@ +module github.com/strangelove-ventures/cometbft-client + +go 1.21 + +require ( + github.com/btcsuite/btcd/btcec/v2 v2.3.2 + github.com/btcsuite/btcd/btcutil v1.1.5 + github.com/cosmos/cosmos-sdk v0.50.3 + github.com/cosmos/gogoproto v1.4.11 + github.com/go-kit/log v0.2.1 + github.com/go-logfmt/logfmt v0.6.0 + github.com/gorilla/websocket v1.5.1 + github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a + github.com/pkg/errors v0.9.1 + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 + github.com/sasha-s/go-deadlock v0.3.1 + github.com/stretchr/testify v1.8.4 + golang.org/x/crypto v0.18.0 + golang.org/x/net v0.20.0 +) + +require ( + cosmossdk.io/api v0.7.2 // indirect + cosmossdk.io/collections v0.4.0 // indirect + cosmossdk.io/core v0.11.0 // indirect + cosmossdk.io/depinject v1.0.0-alpha.4 // indirect + cosmossdk.io/errors v1.0.1 // indirect + cosmossdk.io/log v1.3.0 // indirect + cosmossdk.io/math v1.2.0 // indirect + cosmossdk.io/store v1.0.2 // indirect + cosmossdk.io/x/tx v0.13.0 // indirect + github.com/DataDog/zstd v1.5.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/pebble v0.0.0-20231101195458-481da04154d6 // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/cometbft/cometbft v0.38.2 // indirect + github.com/cometbft/cometbft-db v0.9.1 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-db v1.0.0 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.3 // indirect + github.com/cosmos/ics23/go v0.10.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/getsentry/sentry-go v0.25.0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-metrics v0.5.1 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/iancoleman/strcase v0.3.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/klauspost/compress v1.17.4 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/linxGnu/grocksdb v1.8.6 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rs/zerolog v1.31.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + go.etcd.io/bbolt v1.3.8 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.32.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..faf3cb5 --- /dev/null +++ b/go.sum @@ -0,0 +1,589 @@ +cosmossdk.io/api v0.7.2 h1:BO3i5fvKMKvfaUiMkCznxViuBEfyWA/k6w2eAF6q1C4= +cosmossdk.io/api v0.7.2/go.mod h1:IcxpYS5fMemZGqyYtErK7OqvdM0C8kdW3dq8Q/XIG38= +cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= +cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cosmossdk.io/core v0.11.0 h1:vtIafqUi+1ZNAE/oxLOQQ7Oek2n4S48SWLG8h/+wdbo= +cosmossdk.io/core v0.11.0/go.mod h1:LaTtayWBSoacF5xNzoF8tmLhehqlA9z1SWiPuNC6X1w= +cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= +cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/log v1.3.0 h1:L0Z0XstClo2kOU4h3V1iDoE5Ji64sg5HLOogzGg67Oo= +cosmossdk.io/log v1.3.0/go.mod h1:HIDyvWLqZe2ovlWabsDN4aPMpY/nUEquAhgfTf2ZzB8= +cosmossdk.io/math v1.2.0 h1:8gudhTkkD3NxOP2YyyJIYYmt6dQ55ZfJkDOaxXpy7Ig= +cosmossdk.io/math v1.2.0/go.mod h1:l2Gnda87F0su8a/7FEKJfFdJrM0JZRXQaohlgJeyQh0= +cosmossdk.io/store v1.0.2 h1:lSg5BTvJBHUDwswNNyeh4K/CbqiHER73VU4nDNb8uk0= +cosmossdk.io/store v1.0.2/go.mod h1:EFtENTqVTuWwitGW1VwaBct+yDagk7oG/axBMPH+FXs= +cosmossdk.io/x/tx v0.13.0 h1:8lzyOh3zONPpZv2uTcUmsv0WTXy6T1/aCVDCqShmpzU= +cosmossdk.io/x/tx v0.13.0/go.mod h1:CpNQtmoqbXa33/DVxWQNx5Dcnbkv2xGUhL7tYQ5wUsY= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw= +github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= +github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20231101195458-481da04154d6 h1:g+Y6IAf28JinY3zNdXwpw71SBGhLEb72kGQgiR5XKZM= +github.com/cockroachdb/pebble v0.0.0-20231101195458-481da04154d6/go.mod h1:acMRUGd/BK8AUmQNK3spUCCGzFLZU2bSST3NMXSq2Kc= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/cometbft/cometbft v0.38.2 h1:io0JCh5EPxINKN5ZMI5hCdpW3QVZRy+o8qWe3mlJa/8= +github.com/cometbft/cometbft v0.38.2/go.mod h1:PIi48BpzwlHqtV3mzwPyQgOyOnU94BNBimLS2ebBHOg= +github.com/cometbft/cometbft-db v0.9.1 h1:MIhVX5ja5bXNHF8EYrThkG9F7r9kSfv8BX4LWaxWJ4M= +github.com/cometbft/cometbft-db v0.9.1/go.mod h1:iliyWaoV0mRwBJoizElCwwRA9Tf7jZJOURcRZF9m60U= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-db v1.0.0 h1:EVcQZ+qYag7W6uorBKFPvX6gRjw6Uq2hIh4hCWjuQ0E= +github.com/cosmos/cosmos-db v1.0.0/go.mod h1:iBvi1TtqaedwLdcrZVYRSSCb6eSy61NLj4UNmdIgs0U= +github.com/cosmos/cosmos-proto v1.0.0-beta.3 h1:VitvZ1lPORTVxkmF2fAp3IiA61xVwArQYKXTdEcpW6o= +github.com/cosmos/cosmos-proto v1.0.0-beta.3/go.mod h1:t8IASdLaAq+bbHbjq4p960BvcTqtwuAxid3b/2rOD6I= +github.com/cosmos/cosmos-sdk v0.50.3 h1:zP0AXm54ws2t2qVWvcQhEYVafhOAREU2QL0gnbwjvXw= +github.com/cosmos/cosmos-sdk v0.50.3/go.mod h1:tlrkY1sntOt1q0OX/rqF0zRJtmXNoffAS6VFTcky+w8= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= +github.com/cosmos/iavl v1.0.0 h1:bw6t0Mv/mVCJvlMTOPHWLs5uUE3BRBfVWCRelOzl+so= +github.com/cosmos/iavl v1.0.0/go.mod h1:CmTGqMnRnucjxbjduneZXT+0vPgNElYvdefjX2q9tYc= +github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM= +github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0= +github.com/cosmos/ledger-cosmos-go v0.13.3 h1:7ehuBGuyIytsXbd4MP43mLeoN2LTOEnk5nvue4rK+yM= +github.com/cosmos/ledger-cosmos-go v0.13.3/go.mod h1:HENcEP+VtahZFw38HZ3+LS3Iv5XV6svsnkk9vdJtLr8= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/emicklei/dot v1.6.0 h1:vUzuoVE8ipzS7QkES4UfxdpCwdU2U97m2Pb2tQCoYRY= +github.com/emicklei/dot v1.6.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI= +github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-metrics v0.5.1 h1:rfPwUqFU6uZXNvGl4hzjY8LEBsqFVU4si1H9/Hqck/U= +github.com/hashicorp/go-metrics v0.5.1/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE= +github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= +github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= +github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/linxGnu/grocksdb v1.8.6 h1:O7I6SIGPrypf3f/gmrrLUBQDKfO8uOoYdWf4gLS06tc= +github.com/linxGnu/grocksdb v1.8.6/go.mod h1:xZCIb5Muw+nhbDK4Y5UJuOrin5MceOuiXkVUR7vp4WY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q= +github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc h1:8bQZVK1X6BJR/6nYUPxQEP+ReTsceJTKizeuwjWOPUA= +github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= +github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/golangci.yml b/golangci.yml new file mode 100644 index 0000000..48f1ec8 --- /dev/null +++ b/golangci.yml @@ -0,0 +1,116 @@ +run: + tests: true + timeout: 10m + +linters: + disable-all: true + enable: + - exportloopref + - errcheck + - gci + - goconst + - gocritic + - gofumpt + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - staticcheck + - thelper + - typecheck + - stylecheck + - revive + - typecheck + - tenv + - unconvert + # Prefer unparam over revive's unused param. It is more thorough in its checking. + - unparam + - unused + - misspell + +issues: + exclude-rules: + - text: 'differs only by capitalization to method' + linters: + - revive + - text: 'Use of weak random number generator' + linters: + - gosec + - linters: + - staticcheck + text: "SA1019:" # silence errors on usage of deprecated funcs + + max-issues-per-linter: 10000 + max-same-issues: 10000 + +linters-settings: + gci: + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - blank # blank imports + - dot # dot imports + - prefix(cosmossdk.io) + - prefix(github.com/cosmos/cosmos-sdk) + - prefix(github.com/cometbft/cometbft) + - prefix(github.com/cosmos/ibc-go) + custom-order: true + revive: + enable-all-rules: true + # Do NOT whine about the following, full explanation found in: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#description-of-available-rules + rules: + - name: use-any + disabled: true + - name: if-return + disabled: true + - name: max-public-structs + disabled: true + - name: cognitive-complexity + disabled: true + - name: argument-limit + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: line-length-limit + disabled: true + - name: flag-parameter + disabled: true + - name: add-constant + disabled: true + - name: empty-lines + disabled: true + - name: banned-characters + disabled: true + - name: deep-exit + disabled: true + - name: confusing-results + disabled: true + - name: unused-parameter + disabled: true + - name: modifies-value-receiver + disabled: true + - name: early-return + disabled: true + - name: confusing-naming + disabled: true + - name: defer + disabled: true + # Disabled in favour of unparam. + - name: unused-parameter + disabled: true + - name: unhandled-error + disabled: false + arguments: + - 'fmt.Printf' + - 'fmt.Print' + - 'fmt.Println' + - 'myFunction' \ No newline at end of file diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go new file mode 100644 index 0000000..95b4cc3 --- /dev/null +++ b/libs/bytes/bytes.go @@ -0,0 +1,65 @@ +package bytes + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// HexBytes enables HEX-encoding for json/encoding. +type HexBytes []byte + +// Marshal needed for protobuf compatibility +func (bz HexBytes) Marshal() ([]byte, error) { + return bz, nil +} + +// Unmarshal needed for protobuf compatibility +func (bz *HexBytes) Unmarshal(data []byte) error { + *bz = data + return nil +} + +// This is the point of Bytes. +func (bz HexBytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(bz)) + jbz := make([]byte, len(s)+2) + jbz[0] = '"' + copy(jbz[1:], s) + jbz[len(jbz)-1] = '"' + return jbz, nil +} + +// This is the point of Bytes. +func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("invalid hex string: %s", data) + } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *bz = bz2 + return nil +} + +// Bytes fulfills various interfaces in light-client, etc... +func (bz HexBytes) Bytes() []byte { + return bz +} + +func (bz HexBytes) String() string { + return strings.ToUpper(hex.EncodeToString(bz)) +} + +// Format writes either address of 0th element in a slice in base 16 notation, +// with leading 0x (%p), or casts HexBytes to bytes and writes as hexadecimal +// string to s. +func (bz HexBytes) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + s.Write([]byte(fmt.Sprintf("%p", bz))) + default: + s.Write([]byte(fmt.Sprintf("%X", []byte(bz)))) + } +} diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go new file mode 100644 index 0000000..db882f1 --- /dev/null +++ b/libs/bytes/bytes_test.go @@ -0,0 +1,73 @@ +package bytes + +import ( + "encoding/json" + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + bz := []byte("hello world") + dataB := HexBytes(bz) + bz2, err := dataB.Marshal() + assert.Nil(t, err) + assert.Equal(t, bz, bz2) + + var dataB2 HexBytes + err = (&dataB2).Unmarshal(bz) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + type TestStruct struct { + B1 []byte + B2 HexBytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, + {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + } + + for i, tc := range cases { + tc := tc + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, HexBytes(tc.input)) + }) + } +} + +func TestHexBytes_String(t *testing.T) { + hs := HexBytes([]byte("test me")) + if _, err := strconv.ParseInt(hs.String(), 16, 64); err != nil { + t.Fatal(err) + } +} diff --git a/libs/bytes/byteslice.go b/libs/bytes/byteslice.go new file mode 100644 index 0000000..1d535eb --- /dev/null +++ b/libs/bytes/byteslice.go @@ -0,0 +1,10 @@ +package bytes + +// Fingerprint returns the first 6 bytes of a byte slice. +// If the slice is less than 6 bytes, the fingerprint +// contains trailing zeroes. +func Fingerprint(slice []byte) []byte { + fingerprint := make([]byte, 6) + copy(fingerprint, slice) + return fingerprint +} diff --git a/libs/flowrate/README.md b/libs/flowrate/README.md new file mode 100644 index 0000000..caed79a --- /dev/null +++ b/libs/flowrate/README.md @@ -0,0 +1,10 @@ +Data Flow Rate Control +====================== + +To download and install this package run: + +go get github.com/mxk/go-flowrate/flowrate + +The documentation is available at: + + diff --git a/libs/flowrate/flowrate.go b/libs/flowrate/flowrate.go new file mode 100644 index 0000000..b027a86 --- /dev/null +++ b/libs/flowrate/flowrate.go @@ -0,0 +1,276 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "time" + + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu cmtsync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// Hack to set the current rEMA. +func (m *Monitor) SetREMA(rEMA float64) { + m.mu.Lock() + m.rEMA = rEMA + m.samples++ + m.mu.Unlock() +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Start time.Time // Transfer start time + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress + Active bool // Flag indicating an active transfer +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/libs/flowrate/io.go b/libs/flowrate/io.go new file mode 100644 index 0000000..fbe0909 --- /dev/null +++ b/libs/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/libs/flowrate/io_test.go b/libs/flowrate/io_test.go new file mode 100644 index 0000000..4d7de41 --- /dev/null +++ b/libs/flowrate/io_test.go @@ -0,0 +1,197 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "bytes" + "testing" + "time" +) + +const ( + _50ms = 50 * time.Millisecond + _100ms = 100 * time.Millisecond + _200ms = 200 * time.Millisecond + _300ms = 300 * time.Millisecond + _400ms = 400 * time.Millisecond + _500ms = 500 * time.Millisecond +) + +func nextStatus(m *Monitor) Status { + samples := m.samples + for i := 0; i < 30; i++ { + if s := m.Status(); s.Samples != samples { + return s + } + time.Sleep(5 * time.Millisecond) + } + return m.Status() +} + +func TestReader(t *testing.T) { + in := make([]byte, 100) + for i := range in { + in[i] = byte(i) + } + b := make([]byte, 100) + r := NewReader(bytes.NewReader(in), 100) + start := time.Now() + + // Make sure r implements Limiter + _ = Limiter(r) + + // 1st read of 10 bytes is performed immediately + if n, err := r.Read(b); n != 10 || err != nil { + t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + // No new Reads allowed in the current sample + r.SetBlocking(false) + if n, err := r.Read(b); n != 0 || err != nil { + t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + status := [6]Status{0: r.Status()} // No samples in the first status + + // 2nd read of 10 bytes blocks until the next sample + r.SetBlocking(true) + if n, err := r.Read(b[10:]); n != 10 || err != nil { + t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _100ms { + t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) + } + + status[1] = r.Status() // 1st sample + status[2] = nextStatus(r.Monitor) // 2nd sample + status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample + + if n := r.Done(); n != 20 { + t.Fatalf("r.Done() expected 20; got %v", n) + } + + status[4] = r.Status() + status[5] = nextStatus(r.Monitor) // Timeout + start = status[0].Start + + // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress + want := []Status{ + {start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true}, + {start, 10, 1, 100, 100, 100, 100, 0, _100ms, 0, 0, 0, true}, + {start, 20, 2, 100, 100, 100, 100, 0, _200ms, _100ms, 0, 0, true}, + {start, 20, 3, 0, 90, 67, 100, 0, _300ms, _200ms, 0, 0, true}, + {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, + {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, + } + for i, s := range status { + s := s + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) + } + } + if !bytes.Equal(b[:20], in[:20]) { + t.Errorf("r.Read() input doesn't match output") + } +} + +func TestWriter(t *testing.T) { + b := make([]byte, 100) + for i := range b { + b[i] = byte(i) + } + w := NewWriter(&bytes.Buffer{}, 200) + start := time.Now() + + // Make sure w implements Limiter + _ = Limiter(w) + + // Non-blocking 20-byte write for the first sample returns ErrLimit + w.SetBlocking(false) + if n, err := w.Write(b); n != 20 || err != ErrLimit { + t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("w.Write(b) took too long (%v)", rt) + } + + // Blocking 80-byte write + w.SetBlocking(true) + if n, err := w.Write(b[20:]); n != 80 || err != nil { + t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _300ms { + // Explanation for `rt < _300ms` (as opposed to `< _400ms`) + // + // |<-- start | | + // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms + // sends: 20|20 |20 |20 |20# + // + // NOTE: The '#' symbol can thus happen before 400ms is up. + // Thus, we can only panic if rt < _300ms. + t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) + } + + w.SetTransferSize(100) + status := []Status{w.Status(), nextStatus(w.Monitor)} + start = status[0].Start + + // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress + want := []Status{ + {start, 80, 4, 200, 200, 200, 200, 20, _400ms, 0, _100ms, 80000, true}, + {start, 100, 5, 200, 200, 200, 200, 0, _500ms, _100ms, 0, 100000, true}, + } + + for i, s := range status { + s := s + if !statusesAreEqual(&s, &want[i]) { + t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) + } + } + if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { + t.Errorf("w.Write() input doesn't match output") + } +} + +const maxDeviationForDuration = 50 * time.Millisecond +const maxDeviationForRate int64 = 50 + +// statusesAreEqual returns true if s1 is equal to s2. Equality here means +// general equality of fields except for the duration and rates, which can +// drift due to unpredictable delays (e.g. thread wakes up 25ms after +// `time.Sleep` has ended). +func statusesAreEqual(s1 *Status, s2 *Status) bool { + if s1.Active == s2.Active && + s1.Start == s2.Start && + durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && + s1.Idle == s2.Idle && + s1.Bytes == s2.Bytes && + s1.Samples == s2.Samples && + ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && + ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && + ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && + ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && + s1.BytesRem == s2.BytesRem && + durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && + s1.Progress == s2.Progress { + return true + } + return false +} + +func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { + return d2-d1 <= maxDeviation +} + +func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { + sub := r1 - r2 + if sub < 0 { + sub = -sub + } + if sub <= maxDeviation { + return true + } + return false +} diff --git a/libs/flowrate/util.go b/libs/flowrate/util.go new file mode 100644 index 0000000..b33ddc7 --- /dev/null +++ b/libs/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Now().Round(clockRate) + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Now().Round(clockRate).Sub(czero) +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return czero.Add(c) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} diff --git a/libs/json/decoder.go b/libs/json/decoder.go new file mode 100644 index 0000000..86ff27d --- /dev/null +++ b/libs/json/decoder.go @@ -0,0 +1,278 @@ +package json + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" +) + +// Unmarshal unmarshals JSON into the given value, using Amino-compatible JSON encoding (strings +// for 64-bit numbers, and type wrappers for registered types). +func Unmarshal(bz []byte, v interface{}) error { + return decode(bz, v) +} + +func decode(bz []byte, v interface{}) error { + if len(bz) == 0 { + return errors.New("cannot decode empty bytes") + } + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return errors.New("must decode into a pointer") + } + rv = rv.Elem() + + // If this is a registered type, defer to interface decoder regardless of whether the input is + // an interface or a bare value. This retains Amino's behavior, but is inconsistent with + // behavior in structs where an interface field will get the type wrapper while a bare value + // field will not. + if typeRegistry.name(rv.Type()) != "" { + return decodeReflectInterface(bz, rv) + } + + return decodeReflect(bz, rv) +} + +func decodeReflect(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("value is not addressable") + } + + // Handle null for slices, interfaces, and pointers + if bytes.Equal(bz, []byte("null")) { + rv.Set(reflect.Zero(rv.Type())) + return nil + } + + // Dereference-and-construct pointers, to handle nested pointers. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + // Times must be UTC and end with Z + if rv.Type() == timeType { + switch { + case len(bz) < 2 || bz[0] != '"' || bz[len(bz)-1] != '"': + return fmt.Errorf("JSON time must be an RFC3339 string, but got %q", bz) + case bz[len(bz)-2] != 'Z': + return fmt.Errorf("JSON time must be UTC and end with 'Z', but got %q", bz) + } + } + + // If value implements json.Umarshaler, call it. + if rv.Addr().Type().Implements(jsonUnmarshalerType) { + return rv.Addr().Interface().(json.Unmarshaler).UnmarshalJSON(bz) + } + + switch rv.Type().Kind() { + // Decode complex types recursively. + case reflect.Slice, reflect.Array: + return decodeReflectList(bz, rv) + + case reflect.Map: + return decodeReflectMap(bz, rv) + + case reflect.Struct: + return decodeReflectStruct(bz, rv) + + case reflect.Interface: + return decodeReflectInterface(bz, rv) + + // For 64-bit integers, unwrap expected string and defer to stdlib for integer decoding. + case reflect.Int64, reflect.Int, reflect.Uint64, reflect.Uint: + if bz[0] != '"' || bz[len(bz)-1] != '"' { + return fmt.Errorf("invalid 64-bit integer encoding %q, expected string", string(bz)) + } + bz = bz[1 : len(bz)-1] + fallthrough + + // Anything else we defer to the stdlib. + default: + return decodeStdlib(bz, rv) + } +} + +func decodeReflectList(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("list value is not addressable") + } + + switch rv.Type().Elem().Kind() { + // Decode base64-encoded bytes using stdlib decoder, via byte slice for arrays. + case reflect.Uint8: + if rv.Type().Kind() == reflect.Array { + var buf []byte + if err := json.Unmarshal(bz, &buf); err != nil { + return err + } + if len(buf) != rv.Len() { + return fmt.Errorf("got %v bytes, expected %v", len(buf), rv.Len()) + } + reflect.Copy(rv, reflect.ValueOf(buf)) + + } else if err := decodeStdlib(bz, rv); err != nil { + return err + } + + // Decode anything else into a raw JSON slice, and decode values recursively. + default: + var rawSlice []json.RawMessage + if err := json.Unmarshal(bz, &rawSlice); err != nil { + return err + } + if rv.Type().Kind() == reflect.Slice { + rv.Set(reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), len(rawSlice), len(rawSlice))) + } + if rv.Len() != len(rawSlice) { // arrays of wrong size + return fmt.Errorf("got list of %v elements, expected %v", len(rawSlice), rv.Len()) + } + for i, bz := range rawSlice { + if err := decodeReflect(bz, rv.Index(i)); err != nil { + return err + } + } + } + + // Replace empty slices with nil slices, for Amino compatibility + if rv.Type().Kind() == reflect.Slice && rv.Len() == 0 { + rv.Set(reflect.Zero(rv.Type())) + } + + return nil +} + +func decodeReflectMap(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("map value is not addressable") + } + + // Decode into a raw JSON map, using string keys. + rawMap := make(map[string]json.RawMessage) + if err := json.Unmarshal(bz, &rawMap); err != nil { + return err + } + if rv.Type().Key().Kind() != reflect.String { + return fmt.Errorf("map keys must be strings, got %v", rv.Type().Key().String()) + } + + // Recursively decode values. + rv.Set(reflect.MakeMapWithSize(rv.Type(), len(rawMap))) + for key, bz := range rawMap { + value := reflect.New(rv.Type().Elem()).Elem() + if err := decodeReflect(bz, value); err != nil { + return err + } + rv.SetMapIndex(reflect.ValueOf(key), value) + } + return nil +} + +func decodeReflectStruct(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("struct value is not addressable") + } + sInfo := makeStructInfo(rv.Type()) + + // Decode raw JSON values into a string-keyed map. + rawMap := make(map[string]json.RawMessage) + if err := json.Unmarshal(bz, &rawMap); err != nil { + return err + } + for i, fInfo := range sInfo.fields { + if !fInfo.hidden { + frv := rv.Field(i) + bz := rawMap[fInfo.jsonName] + if len(bz) > 0 { + if err := decodeReflect(bz, frv); err != nil { + return err + } + } else if !fInfo.omitEmpty { + frv.Set(reflect.Zero(frv.Type())) + } + } + } + + return nil +} + +func decodeReflectInterface(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() { + return errors.New("interface value not addressable") + } + + // Decode the interface wrapper. + wrapper := interfaceWrapper{} + if err := json.Unmarshal(bz, &wrapper); err != nil { + return err + } + if wrapper.Type == "" { + return errors.New("interface type cannot be empty") + } + if len(wrapper.Value) == 0 { + return errors.New("interface value cannot be empty") + } + + // Dereference-and-construct pointers, to handle nested pointers. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + // Look up the interface type, and construct a concrete value. + rt, returnPtr := typeRegistry.lookup(wrapper.Type) + if rt == nil { + return fmt.Errorf("unknown type %q", wrapper.Type) + } + + cptr := reflect.New(rt) + crv := cptr.Elem() + if err := decodeReflect(wrapper.Value, crv); err != nil { + return err + } + + // This makes sure interface implementations with pointer receivers (e.g. func (c *Car)) are + // constructed as pointers behind the interface. The types must be registered as pointers with + // RegisterType(). + if rv.Type().Kind() == reflect.Interface && returnPtr { + if !cptr.Type().AssignableTo(rv.Type()) { + return fmt.Errorf("invalid type %q for this value", wrapper.Type) + } + rv.Set(cptr) + } else { + if !crv.Type().AssignableTo(rv.Type()) { + return fmt.Errorf("invalid type %q for this value", wrapper.Type) + } + rv.Set(crv) + } + return nil +} + +func decodeStdlib(bz []byte, rv reflect.Value) error { + if !rv.CanAddr() && rv.Kind() != reflect.Ptr { + return errors.New("value must be addressable or pointer") + } + + // Make sure we are unmarshaling into a pointer. + target := rv + if rv.Kind() != reflect.Ptr { + target = reflect.New(rv.Type()) + } + if err := json.Unmarshal(bz, target.Interface()); err != nil { + return err + } + rv.Set(target.Elem()) + return nil +} + +type interfaceWrapper struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` +} diff --git a/libs/json/decoder_test.go b/libs/json/decoder_test.go new file mode 100644 index 0000000..9507c0a --- /dev/null +++ b/libs/json/decoder_test.go @@ -0,0 +1,151 @@ +package json_test + +import ( + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/json" +) + +func TestUnmarshal(t *testing.T) { + i64Nil := (*int64)(nil) + str := "string" + strPtr := &str + structNil := (*Struct)(nil) + i32 := int32(32) + i64 := int64(64) + + testcases := map[string]struct { + json string + value interface{} + err bool + }{ + "bool true": {"true", true, false}, + "bool false": {"false", false, false}, + "float32": {"3.14", float32(3.14), false}, + "float64": {"3.14", float64(3.14), false}, + "int32": {`32`, int32(32), false}, + "int32 string": {`"32"`, int32(32), true}, + "int32 ptr": {`32`, &i32, false}, + "int64": {`"64"`, int64(64), false}, + "int64 noend": {`"64`, int64(64), true}, + "int64 number": {`64`, int64(64), true}, + "int64 ptr": {`"64"`, &i64, false}, + "int64 ptr nil": {`null`, i64Nil, false}, + "string": {`"foo"`, "foo", false}, + "string noend": {`"foo`, "foo", true}, + "string ptr": {`"string"`, &str, false}, + "slice byte": {`"AQID"`, []byte{1, 2, 3}, false}, + "slice bytes": {`["AQID"]`, [][]byte{{1, 2, 3}}, false}, + "slice int32": {`[1,2,3]`, []int32{1, 2, 3}, false}, + "slice int64": {`["1","2","3"]`, []int64{1, 2, 3}, false}, + "slice int64 number": {`[1,2,3]`, []int64{1, 2, 3}, true}, + "slice int64 ptr": {`["64"]`, []*int64{&i64}, false}, + "slice int64 empty": {`[]`, []int64(nil), false}, + "slice int64 null": {`null`, []int64(nil), false}, + "array byte": {`"AQID"`, [3]byte{1, 2, 3}, false}, + "array byte large": {`"AQID"`, [4]byte{1, 2, 3, 4}, true}, + "array byte small": {`"AQID"`, [2]byte{1, 2}, true}, + "array int32": {`[1,2,3]`, [3]int32{1, 2, 3}, false}, + "array int64": {`["1","2","3"]`, [3]int64{1, 2, 3}, false}, + "array int64 number": {`[1,2,3]`, [3]int64{1, 2, 3}, true}, + "array int64 large": {`["1","2","3"]`, [4]int64{1, 2, 3, 4}, true}, + "array int64 small": {`["1","2","3"]`, [2]int64{1, 2}, true}, + "map bytes": {`{"b":"AQID"}`, map[string][]byte{"b": {1, 2, 3}}, false}, + "map int32": {`{"a":1,"b":2}`, map[string]int32{"a": 1, "b": 2}, false}, + "map int64": {`{"a":"1","b":"2"}`, map[string]int64{"a": 1, "b": 2}, false}, + "map int64 empty": {`{}`, map[string]int64{}, false}, + "map int64 null": {`null`, map[string]int64(nil), false}, + "map int key": {`{}`, map[int]int{}, true}, + "time": {`"2020-06-03T17:35:30Z"`, time.Date(2020, 6, 3, 17, 35, 30, 0, time.UTC), false}, + "time non-utc": {`"2020-06-03T17:35:30+02:00"`, time.Time{}, true}, + "time nozone": {`"2020-06-03T17:35:30"`, time.Time{}, true}, + "car": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Car{Wheels: 4}, false}, + "car ptr": {`{"type":"vehicle/car","value":{"Wheels":4}}`, &Car{Wheels: 4}, false}, + "car iface": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Vehicle(&Car{Wheels: 4}), false}, + "boat": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Boat{Sail: true}, false}, + "boat ptr": {`{"type":"vehicle/boat","value":{"Sail":true}}`, &Boat{Sail: true}, false}, + "boat iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(Boat{Sail: true}), false}, + "boat into car": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Car{}, true}, + "boat into car iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(&Car{}), true}, + "shoes": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, Car{}, true}, + "shoes ptr": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, &Car{}, true}, + "shoes iface": {`{"type":"vehicle/shoes","value":{"Soles":"rubbes"}}`, Vehicle(&Car{}), true}, + "key public": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, false}, + "key wrong": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PrivateKey{1, 2, 3, 4, 5, 6, 7, 8}, true}, + "key into car": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, Vehicle(&Car{}), true}, + "tags": { + `{"name":"name","OmitEmpty":"foo","Hidden":"bar","tags":{"name":"child"}}`, + Tags{JSONName: "name", OmitEmpty: "foo", Tags: &Tags{JSONName: "child"}}, + false, + }, + "tags ptr": { + `{"name":"name","OmitEmpty":"foo","tags":null}`, + &Tags{JSONName: "name", OmitEmpty: "foo"}, + false, + }, + "tags real name": {`{"JSONName":"name"}`, Tags{}, false}, + "struct": { + `{ + "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", + "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", + "Time":"2020-06-02T16:05:13.004346374Z", + "Car":{"Wheels":4}, + "Boat":{"Sail":true}, + "Vehicles":[ + {"type":"vehicle/car","value":{"Wheels":4}}, + {"type":"vehicle/boat","value":{"Sail":true}} + ], + "Child":{ + "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, + "String":"child", "StringPtrPtr":null, "Bytes":null, + "Time":"0001-01-01T00:00:00Z", + "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null + }, + "private": "foo", "unknown": "bar" + }`, + Struct{ + Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, + String: "foo", StringPtrPtr: &strPtr, Bytes: []byte{1, 2, 3}, + Time: time.Date(2020, 6, 2, 16, 5, 13, 4346374, time.UTC), + Car: &Car{Wheels: 4}, Boat: Boat{Sail: true}, Vehicles: []Vehicle{ + Vehicle(&Car{Wheels: 4}), + Vehicle(Boat{Sail: true}), + }, + Child: &Struct{Bool: false, String: "child"}, + }, + false, + }, + "struct key into vehicle": {`{"Vehicles":[ + {"type":"vehicle/car","value":{"Wheels":4}}, + {"type":"key/public","value":"MTIzNDU2Nzg="} + ]}`, Struct{}, true}, + "struct ptr null": {`null`, structNil, false}, + "custom value": {`{"Value":"foo"}`, CustomValue{}, false}, + "custom ptr": {`"foo"`, &CustomPtr{Value: "custom"}, false}, + "custom ptr value": {`"foo"`, CustomPtr{Value: "custom"}, false}, + "invalid type": {`"foo"`, Struct{}, true}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Create a target variable as a pointer to the zero value of the tc.value type, + // and wrap it in an empty interface. Decode into that interface. + target := reflect.New(reflect.TypeOf(tc.value)).Interface() + err := json.Unmarshal([]byte(tc.json), target) + if tc.err { + require.Error(t, err) + return + } + require.NoError(t, err) + + // Unwrap the target pointer and get the value behind the interface. + actual := reflect.ValueOf(target).Elem().Interface() + assert.Equal(t, tc.value, actual) + }) + } +} diff --git a/libs/json/doc.go b/libs/json/doc.go new file mode 100644 index 0000000..a4fb461 --- /dev/null +++ b/libs/json/doc.go @@ -0,0 +1,98 @@ +// Package json provides functions for marshaling and unmarshaling JSON in a format that is +// backwards-compatible with Amino JSON encoding. This mostly differs from encoding/json in +// encoding of integers (64-bit integers are encoded as strings, not numbers), and handling +// of interfaces (wrapped in an interface object with type/value keys). +// +// JSON tags (e.g. `json:"name,omitempty"`) are supported in the same way as encoding/json, as is +// custom marshaling overrides via the json.Marshaler and json.Unmarshaler interfaces. +// +// Note that not all JSON emitted by CometBFT is generated by this library; some is generated by +// encoding/json instead, and kept like that for backwards compatibility. +// +// Encoding of numbers uses strings for 64-bit integers (including unspecified ints), to improve +// compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit +// precision): +// +// int32(32) // Output: 32 +// uint32(32) // Output: 32 +// int64(64) // Output: "64" +// uint64(64) // Output: "64" +// int(64) // Output: "64" +// uint(64) // Output: "64" +// +// Encoding of other scalars follows encoding/json: +// +// nil // Output: null +// true // Output: true +// "foo" // Output: "foo" +// "" // Output: "" +// +// Slices and arrays are encoded as encoding/json, including base64-encoding of byte slices +// with additional base64-encoding of byte arrays as well: +// +// []int64(nil) // Output: null +// []int64{} // Output: [] +// []int64{1, 2, 3} // Output: ["1", "2", "3"] +// []int32{1, 2, 3} // Output: [1, 2, 3] +// []byte{1, 2, 3} // Output: "AQID" +// [3]int64{1, 2, 3} // Output: ["1", "2", "3"] +// [3]byte{1, 2, 3} // Output: "AQID" +// +// Maps are encoded as encoding/json, but only strings are allowed as map keys (nil maps are not +// emitted as null, to retain Amino backwards-compatibility): +// +// map[string]int64(nil) // Output: {} +// map[string]int64{} // Output: {} +// map[string]int64{"a":1,"b":2} // Output: {"a":"1","b":"2"} +// map[string]int32{"a":1,"b":2} // Output: {"a":1,"b":2} +// map[bool]int{true:1} // Errors +// +// Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero +// times emitted as "0001-01-01T00:00:00Z" as with encoding/json): +// +// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) +// // Output: "2020-06-08T14:21:28.000000123Z" +// time.Time{} // Output: "0001-01-01T00:00:00Z" +// (*time.Time)(nil) // Output: null +// +// Structs are encoded as encoding/json, supporting JSON tags and ignoring private fields: +// +// type Struct struct{ +// Name string +// Value int32 `json:"value,omitempty"` +// private bool +// } +// +// Struct{Name: "foo", Value: 7, private: true} // Output: {"Name":"foo","value":7} +// Struct{} // Output: {"Name":""} +// +// Registered types are encoded with type wrapper, regardless of whether they are given as interface +// or bare struct, but inside structs they are only emitted with type wrapper for interface fields +// (this follows Amino behavior): +// +// type Vehicle interface { +// Drive() error +// } +// +// type Car struct { +// Wheels int8 +// } +// +// func (c *Car) Drive() error { return nil } +// +// RegisterType(&Car{}, "vehicle/car") +// +// Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} +// &Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} +// (*Car)(nil) // Output: null +// Vehicle(Car{Wheels: 4}) // Output: {"type":"vehicle/car","value":{"Wheels":4}} +// Vehicle(nil) // Output: null +// +// type Struct struct { +// Car *Car +// Vehicle Vehicle +// } +// +// Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}} +// // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}} +package json diff --git a/libs/json/encoder.go b/libs/json/encoder.go new file mode 100644 index 0000000..11990e2 --- /dev/null +++ b/libs/json/encoder.go @@ -0,0 +1,254 @@ +package json + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "time" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) + jsonMarshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() + jsonUnmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() +) + +// Marshal marshals the value as JSON, using Amino-compatible JSON encoding (strings for +// 64-bit numbers, and type wrappers for registered types). +func Marshal(v interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + err := encode(buf, v) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalIndent marshals the value as JSON, using the given prefix and indentation. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + bz, err := Marshal(v) + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + err = json.Indent(buf, bz, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func encode(w io.Writer, v interface{}) error { + // Bare nil values can't be reflected, so we must handle them here. + if v == nil { + return writeStr(w, "null") + } + rv := reflect.ValueOf(v) + + // If this is a registered type, defer to interface encoder regardless of whether the input is + // an interface or a bare value. This retains Amino's behavior, but is inconsistent with + // behavior in structs where an interface field will get the type wrapper while a bare value + // field will not. + if typeRegistry.name(rv.Type()) != "" { + return encodeReflectInterface(w, rv) + } + + return encodeReflect(w, rv) +} + +func encodeReflect(w io.Writer, rv reflect.Value) error { + if !rv.IsValid() { + return errors.New("invalid reflect value") + } + + // Recursively dereference if pointer. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return writeStr(w, "null") + } + rv = rv.Elem() + } + + // Convert times to UTC. + if rv.Type() == timeType { + rv = reflect.ValueOf(rv.Interface().(time.Time).Round(0).UTC()) + } + + // If the value implements json.Marshaler, defer to stdlib directly. Since we've already + // dereferenced, we try implementations with both value receiver and pointer receiver. We must + // do this after the time normalization above, and thus after dereferencing. + if rv.Type().Implements(jsonMarshalerType) { + return encodeStdlib(w, rv.Interface()) + } else if rv.CanAddr() && rv.Addr().Type().Implements(jsonMarshalerType) { + return encodeStdlib(w, rv.Addr().Interface()) + } + + switch rv.Type().Kind() { + // Complex types must be recursively encoded. + case reflect.Interface: + return encodeReflectInterface(w, rv) + + case reflect.Array, reflect.Slice: + return encodeReflectList(w, rv) + + case reflect.Map: + return encodeReflectMap(w, rv) + + case reflect.Struct: + return encodeReflectStruct(w, rv) + + // 64-bit integers are emitted as strings, to avoid precision problems with e.g. + // Javascript which uses 64-bit floats (having 53-bit precision). + case reflect.Int64, reflect.Int: + return writeStr(w, `"`+strconv.FormatInt(rv.Int(), 10)+`"`) + + case reflect.Uint64, reflect.Uint: + return writeStr(w, `"`+strconv.FormatUint(rv.Uint(), 10)+`"`) + + // For everything else, defer to the stdlib encoding/json encoder + default: + return encodeStdlib(w, rv.Interface()) + } +} + +func encodeReflectList(w io.Writer, rv reflect.Value) error { + // Emit nil slices as null. + if rv.Kind() == reflect.Slice && rv.IsNil() { + return writeStr(w, "null") + } + + // Encode byte slices as base64 with the stdlib encoder. + if rv.Type().Elem().Kind() == reflect.Uint8 { + // Stdlib does not base64-encode byte arrays, only slices, so we copy to slice. + if rv.Type().Kind() == reflect.Array { + slice := reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), rv.Len(), rv.Len()) + reflect.Copy(slice, rv) + rv = slice + } + return encodeStdlib(w, rv.Interface()) + } + + // Anything else we recursively encode ourselves. + length := rv.Len() + if err := writeStr(w, "["); err != nil { + return err + } + for i := 0; i < length; i++ { + if err := encodeReflect(w, rv.Index(i)); err != nil { + return err + } + if i < length-1 { + if err := writeStr(w, ","); err != nil { + return err + } + } + } + return writeStr(w, "]") +} + +func encodeReflectMap(w io.Writer, rv reflect.Value) error { + if rv.Type().Key().Kind() != reflect.String { + return errors.New("map key must be string") + } + + // nil maps are not emitted as nil, to retain Amino compatibility. + + if err := writeStr(w, "{"); err != nil { + return err + } + writeComma := false + for _, keyrv := range rv.MapKeys() { + if writeComma { + if err := writeStr(w, ","); err != nil { + return err + } + } + if err := encodeStdlib(w, keyrv.Interface()); err != nil { + return err + } + if err := writeStr(w, ":"); err != nil { + return err + } + if err := encodeReflect(w, rv.MapIndex(keyrv)); err != nil { + return err + } + writeComma = true + } + return writeStr(w, "}") +} + +func encodeReflectStruct(w io.Writer, rv reflect.Value) error { + sInfo := makeStructInfo(rv.Type()) + if err := writeStr(w, "{"); err != nil { + return err + } + writeComma := false + for i, fInfo := range sInfo.fields { + frv := rv.Field(i) + if fInfo.hidden || (fInfo.omitEmpty && frv.IsZero()) { + continue + } + + if writeComma { + if err := writeStr(w, ","); err != nil { + return err + } + } + if err := encodeStdlib(w, fInfo.jsonName); err != nil { + return err + } + if err := writeStr(w, ":"); err != nil { + return err + } + if err := encodeReflect(w, frv); err != nil { + return err + } + writeComma = true + } + return writeStr(w, "}") +} + +func encodeReflectInterface(w io.Writer, rv reflect.Value) error { + // Get concrete value and dereference pointers. + for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { + if rv.IsNil() { + return writeStr(w, "null") + } + rv = rv.Elem() + } + + // Look up the name of the concrete type + name := typeRegistry.name(rv.Type()) + if name == "" { + return fmt.Errorf("cannot encode unregistered type %v", rv.Type()) + } + + // Write value wrapped in interface envelope + if err := writeStr(w, fmt.Sprintf(`{"type":%q,"value":`, name)); err != nil { + return err + } + if err := encodeReflect(w, rv); err != nil { + return err + } + return writeStr(w, "}") +} + +func encodeStdlib(w io.Writer, v interface{}) error { + // Doesn't stream the output because that adds a newline, as per: + // https://golang.org/pkg/encoding/json/#Encoder.Encode + blob, err := json.Marshal(v) + if err != nil { + return err + } + _, err = w.Write(blob) + return err +} + +func writeStr(w io.Writer, s string) error { + _, err := w.Write([]byte(s)) + return err +} diff --git a/libs/json/encoder_test.go b/libs/json/encoder_test.go new file mode 100644 index 0000000..67f386f --- /dev/null +++ b/libs/json/encoder_test.go @@ -0,0 +1,104 @@ +package json_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/json" +) + +func TestMarshal(t *testing.T) { + s := "string" + sPtr := &s + i64 := int64(64) + ti := time.Date(2020, 6, 2, 18, 5, 13, 4346374, time.FixedZone("UTC+2", 2*60*60)) + car := &Car{Wheels: 4} + boat := Boat{Sail: true} + + testcases := map[string]struct { + value interface{} + output string + }{ + "nil": {nil, `null`}, + "string": {"foo", `"foo"`}, + "float32": {float32(3.14), `3.14`}, + "float32 neg": {float32(-3.14), `-3.14`}, + "float64": {float64(3.14), `3.14`}, + "float64 neg": {float64(-3.14), `-3.14`}, + "int32": {int32(32), `32`}, + "int64": {int64(64), `"64"`}, + "int64 neg": {int64(-64), `"-64"`}, + "int64 ptr": {&i64, `"64"`}, + "uint64": {uint64(64), `"64"`}, + "time": {ti, `"2020-06-02T16:05:13.004346374Z"`}, + "time empty": {time.Time{}, `"0001-01-01T00:00:00Z"`}, + "time ptr": {&ti, `"2020-06-02T16:05:13.004346374Z"`}, + "customptr": {CustomPtr{Value: "x"}, `{"Value":"x"}`}, // same as encoding/json + "customptr ptr": {&CustomPtr{Value: "x"}, `"custom"`}, + "customvalue": {CustomValue{Value: "x"}, `"custom"`}, + "customvalue ptr": {&CustomValue{Value: "x"}, `"custom"`}, + "slice nil": {[]int(nil), `null`}, + "slice empty": {[]int{}, `[]`}, + "slice bytes": {[]byte{1, 2, 3}, `"AQID"`}, + "slice int64": {[]int64{1, 2, 3}, `["1","2","3"]`}, + "slice int64 ptr": {[]*int64{&i64, nil}, `["64",null]`}, + "array bytes": {[3]byte{1, 2, 3}, `"AQID"`}, + "array int64": {[3]int64{1, 2, 3}, `["1","2","3"]`}, + "map nil": {map[string]int64(nil), `{}`}, // retain Amino compatibility + "map empty": {map[string]int64{}, `{}`}, + "map int64": {map[string]int64{"a": 1, "b": 2, "c": 3}, `{"a":"1","b":"2","c":"3"}`}, + "car": {car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, + "car value": {*car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, + "car iface": {Vehicle(car), `{"type":"vehicle/car","value":{"Wheels":4}}`}, + "car nil": {(*Car)(nil), `null`}, + "boat": {boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, + "boat ptr": {&boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, + "boat iface": {Vehicle(boat), `{"type":"vehicle/boat","value":{"Sail":true}}`}, + "key public": {PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, `{"type":"key/public","value":"AQIDBAUGBwg="}`}, + "tags": { + Tags{JSONName: "name", OmitEmpty: "foo", Hidden: "bar", Tags: &Tags{JSONName: "child"}}, + `{"name":"name","OmitEmpty":"foo","tags":{"name":"child"}}`, + }, + "tags empty": {Tags{}, `{"name":""}`}, + // The encoding of the Car and Boat fields do not have type wrappers, even though they get + // type wrappers when encoded directly (see "car" and "boat" tests). This is to retain the + // same behavior as Amino. If the field was a Vehicle interface instead, it would get + // type wrappers, as seen in the Vehicles field. + "struct": { + Struct{ + Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, + String: "foo", StringPtrPtr: &sPtr, Bytes: []byte{1, 2, 3}, + Time: ti, Car: car, Boat: boat, Vehicles: []Vehicle{car, boat}, + Child: &Struct{Bool: false, String: "child"}, private: "private", + }, + `{ + "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", + "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", + "Time":"2020-06-02T16:05:13.004346374Z", + "Car":{"Wheels":4}, + "Boat":{"Sail":true}, + "Vehicles":[ + {"type":"vehicle/car","value":{"Wheels":4}}, + {"type":"vehicle/boat","value":{"Sail":true}} + ], + "Child":{ + "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, + "String":"child", "StringPtrPtr":null, "Bytes":null, + "Time":"0001-01-01T00:00:00Z", + "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null + } + }`, + }, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + bz, err := json.Marshal(tc.value) + require.NoError(t, err) + assert.JSONEq(t, tc.output, string(bz)) + }) + } +} diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go new file mode 100644 index 0000000..de2bf43 --- /dev/null +++ b/libs/json/helpers_test.go @@ -0,0 +1,93 @@ +package json_test + +import ( + "time" + + "github.com/strangelove-ventures/cometbft-client/libs/json" +) + +// Register Car, an instance of the Vehicle interface. +func init() { + json.RegisterType(&Car{}, "vehicle/car") + json.RegisterType(Boat{}, "vehicle/boat") + json.RegisterType(PublicKey{}, "key/public") + json.RegisterType(PrivateKey{}, "key/private") +} + +type Vehicle interface { + Drive() error +} + +// Car is a pointer implementation of Vehicle. +type Car struct { + Wheels int32 +} + +func (c *Car) Drive() error { return nil } + +// Boat is a value implementation of Vehicle. +type Boat struct { + Sail bool +} + +func (b Boat) Drive() error { return nil } + +// These are public and private encryption keys. +type ( + PublicKey [8]byte + PrivateKey [8]byte +) + +// Custom has custom marshalers and unmarshalers, taking pointer receivers. +type CustomPtr struct { + Value string +} + +func (c *CustomPtr) MarshalJSON() ([]byte, error) { + return []byte("\"custom\""), nil +} + +func (c *CustomPtr) UnmarshalJSON(_ []byte) error { + c.Value = "custom" + return nil +} + +// CustomValue has custom marshalers and unmarshalers, taking value receivers (which usually doesn't +// make much sense since the unmarshaler can't change anything). +type CustomValue struct { + Value string +} + +func (c CustomValue) MarshalJSON() ([]byte, error) { + return []byte("\"custom\""), nil +} + +func (c CustomValue) UnmarshalJSON(_ []byte) error { + return nil +} + +// Tags tests JSON tags. +type Tags struct { + JSONName string `json:"name"` + OmitEmpty string `json:",omitempty"` + Hidden string `json:"-"` + Tags *Tags `json:"tags,omitempty"` +} + +// Struct tests structs with lots of contents. +type Struct struct { + Bool bool + Float64 float64 + Int32 int32 + Int64 int64 + Int64Ptr *int64 + String string + StringPtrPtr **string + Bytes []byte + Time time.Time + Car *Car + Boat Boat + Vehicles []Vehicle + Child *Struct + private string +} diff --git a/libs/json/structs.go b/libs/json/structs.go new file mode 100644 index 0000000..8c0d1ca --- /dev/null +++ b/libs/json/structs.go @@ -0,0 +1,88 @@ +package json + +import ( + "fmt" + "reflect" + "strings" + "unicode" + + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +var ( + // cache caches struct info. + cache = newStructInfoCache() +) + +// structCache is a cache of struct info. +type structInfoCache struct { + cmtsync.RWMutex + structInfos map[reflect.Type]*structInfo +} + +func newStructInfoCache() *structInfoCache { + return &structInfoCache{ + structInfos: make(map[reflect.Type]*structInfo), + } +} + +func (c *structInfoCache) get(rt reflect.Type) *structInfo { + c.RLock() + defer c.RUnlock() + return c.structInfos[rt] +} + +func (c *structInfoCache) set(rt reflect.Type, sInfo *structInfo) { + c.Lock() + defer c.Unlock() + c.structInfos[rt] = sInfo +} + +// structInfo contains JSON info for a struct. +type structInfo struct { + fields []*fieldInfo +} + +// fieldInfo contains JSON info for a struct field. +type fieldInfo struct { + jsonName string + omitEmpty bool + hidden bool +} + +// makeStructInfo generates structInfo for a struct as a reflect.Value. +func makeStructInfo(rt reflect.Type) *structInfo { + if rt.Kind() != reflect.Struct { + panic(fmt.Sprintf("can't make struct info for non-struct value %v", rt)) + } + if sInfo := cache.get(rt); sInfo != nil { + return sInfo + } + fields := make([]*fieldInfo, 0, rt.NumField()) + for i := 0; i < cap(fields); i++ { + frt := rt.Field(i) + fInfo := &fieldInfo{ + jsonName: frt.Name, + omitEmpty: false, + hidden: frt.Name == "" || !unicode.IsUpper(rune(frt.Name[0])), + } + o := frt.Tag.Get("json") + if o == "-" { + fInfo.hidden = true + } else if o != "" { + opts := strings.Split(o, ",") + if opts[0] != "" { + fInfo.jsonName = opts[0] + } + for _, o := range opts[1:] { + if o == "omitempty" { + fInfo.omitEmpty = true + } + } + } + fields = append(fields, fInfo) + } + sInfo := &structInfo{fields: fields} + cache.set(rt, sInfo) + return sInfo +} diff --git a/libs/json/types.go b/libs/json/types.go new file mode 100644 index 0000000..b028812 --- /dev/null +++ b/libs/json/types.go @@ -0,0 +1,109 @@ +package json + +import ( + "errors" + "fmt" + "reflect" + + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +var ( + // typeRegistry contains globally registered types for JSON encoding/decoding. + typeRegistry = newTypes() +) + +// RegisterType registers a type for Amino-compatible interface encoding in the global type +// registry. These types will be encoded with a type wrapper `{"type":"","value":}` +// regardless of which interface they are wrapped in (if any). If the type is a pointer, it will +// still be valid both for value and pointer types, but decoding into an interface will generate +// the a value or pointer based on the registered type. +// +// Should only be called in init() functions, as it panics on error. +func RegisterType(_type interface{}, name string) { + if _type == nil { + panic("cannot register nil type") + } + err := typeRegistry.register(name, reflect.ValueOf(_type).Type()) + if err != nil { + panic(err) + } +} + +// typeInfo contains type information. +type typeInfo struct { + name string + rt reflect.Type + returnPtr bool +} + +// types is a type registry. It is safe for concurrent use. +type types struct { + cmtsync.RWMutex + byType map[reflect.Type]*typeInfo + byName map[string]*typeInfo +} + +// newTypes creates a new type registry. +func newTypes() types { + return types{ + byType: map[reflect.Type]*typeInfo{}, + byName: map[string]*typeInfo{}, + } +} + +// registers the given type with the given name. The name and type must not be registered already. +func (t *types) register(name string, rt reflect.Type) error { + if name == "" { + return errors.New("name cannot be empty") + } + // If this is a pointer type, we recursively resolve until we get a bare type, but register that + // we should return pointers. + returnPtr := false + for rt.Kind() == reflect.Ptr { + returnPtr = true + rt = rt.Elem() + } + tInfo := &typeInfo{ + name: name, + rt: rt, + returnPtr: returnPtr, + } + + t.Lock() + defer t.Unlock() + if _, ok := t.byName[tInfo.name]; ok { + return fmt.Errorf("a type with name %q is already registered", name) + } + if _, ok := t.byType[tInfo.rt]; ok { + return fmt.Errorf("the type %v is already registered", rt) + } + t.byName[name] = tInfo + t.byType[rt] = tInfo + return nil +} + +// lookup looks up a type from a name, or nil if not registered. +func (t *types) lookup(name string) (reflect.Type, bool) { + t.RLock() + defer t.RUnlock() + tInfo := t.byName[name] + if tInfo == nil { + return nil, false + } + return tInfo.rt, tInfo.returnPtr +} + +// name looks up the name of a type, or empty if not registered. Unwraps pointers as necessary. +func (t *types) name(rt reflect.Type) string { + for rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + t.RLock() + defer t.RUnlock() + tInfo := t.byType[rt] + if tInfo == nil { + return "" + } + return tInfo.name +} diff --git a/libs/log/filter.go b/libs/log/filter.go new file mode 100644 index 0000000..4b7ed98 --- /dev/null +++ b/libs/log/filter.go @@ -0,0 +1,196 @@ +package log + +import "fmt" + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelError +) + +type filter struct { + next Logger + allowed level // XOR'd levels for default case + initiallyAllowed level // XOR'd levels for initial case + allowedKeyvals map[keyval]level // When key-value match, use this level +} + +type keyval struct { + key interface{} + value interface{} +} + +// NewFilter wraps next and implements filtering. See the commentary on the +// Option functions for a detailed description of how to configure levels. If +// no options are provided, all leveled log events created with Debug, Info or +// Error helper methods are squelched. +func NewFilter(next Logger, options ...Option) Logger { + l := &filter{ + next: next, + allowedKeyvals: make(map[keyval]level), + } + for _, option := range options { + option(l) + } + l.initiallyAllowed = l.allowed + return l +} + +func (l *filter) Info(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelInfo != 0 + if !levelAllowed { + return + } + l.next.Info(msg, keyvals...) +} + +func (l *filter) Debug(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelDebug != 0 + if !levelAllowed { + return + } + l.next.Debug(msg, keyvals...) +} + +func (l *filter) Error(msg string, keyvals ...interface{}) { + levelAllowed := l.allowed&levelError != 0 + if !levelAllowed { + return + } + l.next.Error(msg, keyvals...) +} + +// With implements Logger by constructing a new filter with a keyvals appended +// to the logger. +// +// If custom level was set for a keyval pair using one of the +// Allow*With methods, it is used as the logger's level. +// +// Examples: +// +// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) +// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" +// +// logger = log.NewFilter(logger, log.AllowError(), +// log.AllowInfoWith("module", "crypto"), +// log.AllowNoneWith("user", "Sam")) +// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil +// +// logger = log.NewFilter(logger, +// log.AllowError(), +// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam")) +// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" +func (l *filter) With(keyvals ...interface{}) Logger { + keyInAllowedKeyvals := false + + for i := len(keyvals) - 2; i >= 0; i -= 2 { + for kv, allowed := range l.allowedKeyvals { + if keyvals[i] == kv.key { + keyInAllowedKeyvals = true + // Example: + // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) + // logger.With("module", "crypto") + if keyvals[i+1] == kv.value { + return &filter{ + next: l.next.With(keyvals...), + allowed: allowed, // set the desired level + allowedKeyvals: l.allowedKeyvals, + initiallyAllowed: l.initiallyAllowed, + } + } + } + } + } + + // Example: + // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) + // logger.With("module", "main") + if keyInAllowedKeyvals { + return &filter{ + next: l.next.With(keyvals...), + allowed: l.initiallyAllowed, // return back to initially allowed + allowedKeyvals: l.allowedKeyvals, + initiallyAllowed: l.initiallyAllowed, + } + } + + return &filter{ + next: l.next.With(keyvals...), + allowed: l.allowed, // simply continue with the current level + allowedKeyvals: l.allowedKeyvals, + initiallyAllowed: l.initiallyAllowed, + } +} + +//-------------------------------------------------------------------------------- + +// Option sets a parameter for the filter. +type Option func(*filter) + +// AllowLevel returns an option for the given level or error if no option exist +// for such level. +func AllowLevel(lvl string) (Option, error) { + switch lvl { + case "debug": + return AllowDebug(), nil + case "info": + return AllowInfo(), nil + case "error": + return AllowError(), nil + case "none": + return AllowNone(), nil + default: + return nil, fmt.Errorf("expected either \"info\", \"debug\", \"error\" or \"none\" level, given %s", lvl) + } +} + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelInfo | levelDebug) +} + +// AllowInfo allows error and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelInfo) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *filter) { l.allowed = allowed } +} + +// AllowDebugWith allows error, info and debug level log events to pass for a specific key value pair. +func AllowDebugWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo | levelDebug } +} + +// AllowInfoWith allows error and info level log events to pass for a specific key value pair. +func AllowInfoWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError | levelInfo } +} + +// AllowErrorWith allows only error level log events to pass for a specific key value pair. +func AllowErrorWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = levelError } +} + +// AllowNoneWith allows no leveled log events to pass for a specific key value pair. +func AllowNoneWith(key interface{}, value interface{}) Option { + return func(l *filter) { l.allowedKeyvals[keyval{key, value}] = 0 } +} diff --git a/libs/log/filter_test.go b/libs/log/filter_test.go new file mode 100644 index 0000000..a401ee6 --- /dev/null +++ b/libs/log/filter_test.go @@ -0,0 +1,140 @@ +package log_test + +import ( + "bytes" + "strings" + "testing" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +func TestVariousLevels(t *testing.T) { + testCases := []struct { + name string + allowed log.Option + want string + }{ + { + "AllowAll", + log.AllowAll(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowDebug", + log.AllowDebug(), + strings.Join([]string{ + `{"_msg":"here","level":"debug","this is":"debug log"}`, + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowInfo", + log.AllowInfo(), + strings.Join([]string{ + `{"_msg":"here","level":"info","this is":"info log"}`, + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowError", + log.AllowError(), + strings.Join([]string{ + `{"_msg":"here","level":"error","this is":"error log"}`, + }, "\n"), + }, + { + "AllowNone", + log.AllowNone(), + ``, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + logger := log.NewFilter(log.NewTMJSONLoggerNoTS(&buf), tc.allowed) + + logger.Debug("here", "this is", "debug log") + logger.Info("here", "this is", "info log") + logger.Error("here", "this is", "error log") + + if want, have := tc.want, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant:\n%s\nhave:\n%s", want, have) + } + }) + } +} + +func TestLevelContext(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLoggerNoTS(&buf) + logger = log.NewFilter(logger, log.AllowError()) + logger = logger.With("context", "value") + + logger.Error("foo", "bar", "baz") + + want := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}` + have := strings.TrimSpace(buf.String()) + if want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + logger.Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} + +func TestVariousAllowWith(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLoggerNoTS(&buf) + + logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) + logger1.With("context", "value").Info("foo", "bar", "baz") + + want := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}` + have := strings.TrimSpace(buf.String()) + if want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger2 := log.NewFilter( + logger, + log.AllowError(), + log.AllowInfoWith("context", "value"), + log.AllowNoneWith("user", "Sam"), + ) + + logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz") + if want, have := ``, strings.TrimSpace(buf.String()); want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger3 := log.NewFilter( + logger, + log.AllowError(), + log.AllowInfoWith("context", "value"), + log.AllowNoneWith("user", "Sam"), + ) + + logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + + want = `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}` + have = strings.TrimSpace(buf.String()) + if want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/libs/log/lazy.go b/libs/log/lazy.go new file mode 100644 index 0000000..892323a --- /dev/null +++ b/libs/log/lazy.go @@ -0,0 +1,42 @@ +package log + +import ( + "fmt" + + cmtbytes "github.com/strangelove-ventures/cometbft-client/libs/bytes" +) + +type LazySprintf struct { + format string + args []interface{} +} + +// NewLazySprintf defers fmt.Sprintf until the Stringer interface is invoked. +// This is particularly useful for avoiding calling Sprintf when debugging is not +// active. +func NewLazySprintf(format string, args ...interface{}) *LazySprintf { + return &LazySprintf{format, args} +} + +func (l *LazySprintf) String() string { + return fmt.Sprintf(l.format, l.args...) +} + +type LazyBlockHash struct { + block hashable +} + +type hashable interface { + Hash() cmtbytes.HexBytes +} + +// NewLazyBlockHash defers block Hash until the Stringer interface is invoked. +// This is particularly useful for avoiding calling Sprintf when debugging is not +// active. +func NewLazyBlockHash(block hashable) *LazyBlockHash { + return &LazyBlockHash{block} +} + +func (l *LazyBlockHash) String() string { + return l.block.Hash().String() +} diff --git a/libs/log/logger.go b/libs/log/logger.go new file mode 100644 index 0000000..22ed68f --- /dev/null +++ b/libs/log/logger.go @@ -0,0 +1,30 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/log" +) + +// Logger is what any CometBFT library should take. +type Logger interface { + Debug(msg string, keyvals ...interface{}) + Info(msg string, keyvals ...interface{}) + Error(msg string, keyvals ...interface{}) + + With(keyvals ...interface{}) Logger +} + +// NewSyncWriter returns a new writer that is safe for concurrent use by +// multiple goroutines. Writes to the returned writer are passed on to w. If +// another write is already in progress, the calling goroutine blocks until +// the writer is available. +// +// If w implements the following interface, so does the returned writer. +// +// interface { +// Fd() uintptr +// } +func NewSyncWriter(w io.Writer) io.Writer { + return kitlog.NewSyncWriter(w) +} diff --git a/libs/log/nop_logger.go b/libs/log/nop_logger.go new file mode 100644 index 0000000..12d75ab --- /dev/null +++ b/libs/log/nop_logger.go @@ -0,0 +1,17 @@ +package log + +type nopLogger struct{} + +// Interface assertions +var _ Logger = (*nopLogger)(nil) + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return &nopLogger{} } + +func (nopLogger) Info(string, ...interface{}) {} +func (nopLogger) Debug(string, ...interface{}) {} +func (nopLogger) Error(string, ...interface{}) {} + +func (l *nopLogger) With(...interface{}) Logger { + return l +} diff --git a/libs/log/testing_logger.go b/libs/log/testing_logger.go new file mode 100644 index 0000000..7c6f661 --- /dev/null +++ b/libs/log/testing_logger.go @@ -0,0 +1,60 @@ +package log + +import ( + "io" + "os" + "testing" + + "github.com/go-kit/log/term" +) + +var ( + // reuse the same logger across all tests + _testingLogger Logger +) + +// TestingLogger returns a TMLogger which writes to STDOUT if testing being run +// with the verbose (-v) flag, NopLogger otherwise. +// +// Note that the call to TestingLogger() must be made +// inside a test (not in the init func) because +// verbose flag only set at the time of testing. +func TestingLogger() Logger { + return TestingLoggerWithOutput(os.Stdout) +} + +// TestingLoggerWOutput returns a TMLogger which writes to (w io.Writer) if testing being run +// with the verbose (-v) flag, NopLogger otherwise. +// +// Note that the call to TestingLoggerWithOutput(w io.Writer) must be made +// inside a test (not in the init func) because +// verbose flag only set at the time of testing. +func TestingLoggerWithOutput(w io.Writer) Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLogger(NewSyncWriter(w)) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} + +// TestingLoggerWithColorFn allow you to provide your own color function. See +// TestingLogger for documentation. +func TestingLoggerWithColorFn(colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + if _testingLogger != nil { + return _testingLogger + } + + if testing.Verbose() { + _testingLogger = NewTMLoggerWithColorFn(NewSyncWriter(os.Stdout), colorFn) + } else { + _testingLogger = NewNopLogger() + } + + return _testingLogger +} diff --git a/libs/log/tm_json_logger.go b/libs/log/tm_json_logger.go new file mode 100644 index 0000000..786b618 --- /dev/null +++ b/libs/log/tm_json_logger.go @@ -0,0 +1,24 @@ +package log + +import ( + "io" + + kitlog "github.com/go-kit/log" +) + +// NewTMJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewTMJSONLogger(w io.Writer) Logger { + logger := kitlog.NewJSONLogger(w) + logger = kitlog.With(logger, "ts", kitlog.DefaultTimestampUTC) + return &tmLogger{logger} +} + +// NewTMJSONLoggerNoTS is the same as NewTMJSONLogger, but without the +// timestamp. +func NewTMJSONLoggerNoTS(w io.Writer) Logger { + logger := kitlog.NewJSONLogger(w) + return &tmLogger{logger} +} diff --git a/libs/log/tm_logger.go b/libs/log/tm_logger.go new file mode 100644 index 0000000..ac0d08a --- /dev/null +++ b/libs/log/tm_logger.go @@ -0,0 +1,86 @@ +package log + +import ( + "fmt" + "io" + + kitlog "github.com/go-kit/log" + kitlevel "github.com/go-kit/log/level" + "github.com/go-kit/log/term" +) + +const ( + msgKey = "_msg" // "_" prefixed to avoid collisions + moduleKey = "module" +) + +type tmLogger struct { + srcLogger kitlog.Logger +} + +// Interface assertions +var _ Logger = (*tmLogger)(nil) + +// NewTMLogger returns a logger that encodes msg and keyvals to the Writer +// using go-kit's log as an underlying logger and our custom formatter. Note +// that underlying logger could be swapped with something else. +func NewTMLogger(w io.Writer) Logger { + // Color by level value + colorFn := func(keyvals ...interface{}) term.FgBgColor { + if keyvals[0] != kitlevel.Key() { + panic(fmt.Sprintf("expected level key to be first, got %v", keyvals[0])) + } + switch keyvals[1].(kitlevel.Value).String() { + case "debug": + return term.FgBgColor{Fg: term.DarkGray} + case "error": + return term.FgBgColor{Fg: term.Red} + default: + return term.FgBgColor{} + } + } + + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// NewTMLoggerWithColorFn allows you to provide your own color function. See +// NewTMLogger for documentation. +func NewTMLoggerWithColorFn(w io.Writer, colorFn func(keyvals ...interface{}) term.FgBgColor) Logger { + return &tmLogger{term.NewLogger(w, NewTMFmtLogger, colorFn)} +} + +// Info logs a message at level Info. +func (l *tmLogger) Info(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Info(l.srcLogger) + + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again + } +} + +// Debug logs a message at level Debug. +func (l *tmLogger) Debug(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Debug(l.srcLogger) + + if err := kitlog.With(lWithLevel, msgKey, msg).Log(keyvals...); err != nil { + errLogger := kitlevel.Error(l.srcLogger) + kitlog.With(errLogger, msgKey, msg).Log("err", err) //nolint:errcheck // no need to check error again + } +} + +// Error logs a message at level Error. +func (l *tmLogger) Error(msg string, keyvals ...interface{}) { + lWithLevel := kitlevel.Error(l.srcLogger) + + lWithMsg := kitlog.With(lWithLevel, msgKey, msg) + if err := lWithMsg.Log(keyvals...); err != nil { + lWithMsg.Log("err", err) //nolint:errcheck // no need to check error again + } +} + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Info, Debug or Error. +func (l *tmLogger) With(keyvals ...interface{}) Logger { + return &tmLogger{kitlog.With(l.srcLogger, keyvals...)} +} diff --git a/libs/log/tm_logger_test.go b/libs/log/tm_logger_test.go new file mode 100644 index 0000000..505c087 --- /dev/null +++ b/libs/log/tm_logger_test.go @@ -0,0 +1,112 @@ +package log_test + +import ( + "bytes" + "io" + "strings" + "testing" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +func TestLoggerLogsItsErrors(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMLogger(&buf) + logger.Info("foo", "baz baz", "bar") + msg := strings.TrimSpace(buf.String()) + if !strings.Contains(msg, "foo") { + t.Errorf("expected logger msg to contain ErrInvalidKey, got %s", msg) + } +} + +func TestInfo(t *testing.T) { + var bufInfo bytes.Buffer + + l := log.NewTMLogger(&bufInfo) + l.Info("Client initialized with old header (trusted is more recent)", + "old", 42, + "trustedHeight", "forty two", + "trustedHash", []byte("test me")) + + msg := strings.TrimSpace(bufInfo.String()) + + // Remove the timestamp information to allow + // us to test against the expected message. + receivedmsg := strings.Split(msg, "] ")[1] + + const expectedmsg = `Client initialized with old header + (trusted is more recent) old=42 trustedHeight="forty two" + trustedHash=74657374206D65` + if strings.EqualFold(receivedmsg, expectedmsg) { + t.Fatalf("received %s, expected %s", receivedmsg, expectedmsg) + } +} + +func TestDebug(t *testing.T) { + var bufDebug bytes.Buffer + + ld := log.NewTMLogger(&bufDebug) + ld.Debug("Client initialized with old header (trusted is more recent)", + "old", 42, + "trustedHeight", "forty two", + "trustedHash", []byte("test me")) + + msg := strings.TrimSpace(bufDebug.String()) + + // Remove the timestamp information to allow + // us to test against the expected message. + receivedmsg := strings.Split(msg, "] ")[1] + + const expectedmsg = `Client initialized with old header + (trusted is more recent) old=42 trustedHeight="forty two" + trustedHash=74657374206D65` + if strings.EqualFold(receivedmsg, expectedmsg) { + t.Fatalf("received %s, expected %s", receivedmsg, expectedmsg) + } +} + +func TestError(t *testing.T) { + var bufErr bytes.Buffer + + le := log.NewTMLogger(&bufErr) + le.Error("Client initialized with old header (trusted is more recent)", + "old", 42, + "trustedHeight", "forty two", + "trustedHash", []byte("test me")) + + msg := strings.TrimSpace(bufErr.String()) + + // Remove the timestamp information to allow + // us to test against the expected message. + receivedmsg := strings.Split(msg, "] ")[1] + + const expectedmsg = `Client initialized with old header + (trusted is more recent) old=42 trustedHeight="forty two" + trustedHash=74657374206D65` + if strings.EqualFold(receivedmsg, expectedmsg) { + t.Fatalf("received %s, expected %s", receivedmsg, expectedmsg) + } +} + +func BenchmarkTMLoggerSimple(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(io.Discard), baseInfoMessage) +} + +func BenchmarkTMLoggerContextual(b *testing.B) { + benchmarkRunner(b, log.NewTMLogger(io.Discard), withInfoMessage) +} + +func benchmarkRunner(b *testing.B, logger log.Logger, f func(log.Logger)) { + lc := logger.With("common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseInfoMessage = func(logger log.Logger) { logger.Info("foo_message", "foo_key", "foo_value") } + withInfoMessage = func(logger log.Logger) { logger.With("a", "b").Info("c", "d", "f") } +) diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go new file mode 100644 index 0000000..1d8cb80 --- /dev/null +++ b/libs/log/tmfmt_logger.go @@ -0,0 +1,141 @@ +package log + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "strings" + "sync" + "time" + + kitlog "github.com/go-kit/log" + kitlevel "github.com/go-kit/log/level" + "github.com/go-logfmt/logfmt" +) + +type tmfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *tmfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var tmfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc tmfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type tmfmtLogger struct { + w io.Writer +} + +// NewTMFmtLogger returns a logger that encodes keyvals to the Writer in +// CometBFT custom format. Note complex types (structs, maps, slices) +// formatted as "%+v". +// +// Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewTMFmtLogger(w io.Writer) kitlog.Logger { + return &tmfmtLogger{w} +} + +func (l tmfmtLogger) Log(keyvals ...interface{}) error { + enc := tmfmtEncoderPool.Get().(*tmfmtEncoder) + enc.Reset() + defer tmfmtEncoderPool.Put(enc) + + const unknown = "unknown" + lvl := "none" + msg := unknown + module := unknown + + // indexes of keys to skip while encoding later + excludeIndexes := make([]int, 0) + + for i := 0; i < len(keyvals)-1; i += 2 { + // Extract level + switch keyvals[i] { + case kitlevel.Key(): + excludeIndexes = append(excludeIndexes, i) + switch keyvals[i+1].(type) { //nolint:gocritic + case string: + lvl = keyvals[i+1].(string) + case kitlevel.Value: + lvl = keyvals[i+1].(kitlevel.Value).String() + default: + panic(fmt.Sprintf("level value of unknown type %T", keyvals[i+1])) + } + // and message + case msgKey: + excludeIndexes = append(excludeIndexes, i) + msg = keyvals[i+1].(string) + // and module (could be multiple keyvals; if such case last keyvalue wins) + case moduleKey: + excludeIndexes = append(excludeIndexes, i) + module = keyvals[i+1].(string) + } + + // Print []byte as a hexadecimal string (uppercased) + if b, ok := keyvals[i+1].([]byte); ok { + keyvals[i+1] = strings.ToUpper(hex.EncodeToString(b)) + } + + // Realize stringers + if s, ok := keyvals[i+1].(fmt.Stringer); ok { + keyvals[i+1] = s.String() + } + + } + + // Form a custom CometBFT line + // + // Example: + // D[2016-05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) + // + // Description: + // D - first character of the level, uppercase (ASCII only) + // [2016-05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) + // Stopping ... - message + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("2006-01-02|15:04:05.000"), msg)) + + if module != unknown { + enc.buf.WriteString("module=" + module + " ") + } + +KeyvalueLoop: + for i := 0; i < len(keyvals)-1; i += 2 { + for _, j := range excludeIndexes { + if i == j { + continue KeyvalueLoop + } + } + + err := enc.EncodeKeyval(keyvals[i], keyvals[i+1]) + if err == logfmt.ErrUnsupportedValueType { + enc.EncodeKeyval(keyvals[i], fmt.Sprintf("%+v", keyvals[i+1])) //nolint:errcheck // no need to check error again + } else if err != nil { + return err + } + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go new file mode 100644 index 0000000..3e216f3 --- /dev/null +++ b/libs/log/tmfmt_logger_test.go @@ -0,0 +1,125 @@ +package log_test + +import ( + "bytes" + "errors" + "io" + "math" + "regexp" + "testing" + + kitlog "github.com/go-kit/log" + "github.com/stretchr/testify/assert" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +func TestTMFmtLogger(t *testing.T) { + t.Parallel() + buf := &bytes.Buffer{} + logger := log.NewTMFmtLogger(buf) + + if err := logger.Log("hello", "world"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hello=world\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("a", 1, "err", errors.New("error")); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ a=1 err=error\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("std_map", map[int]int{1: 2}, "my_map", mymap{0: 0}); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ std_map=map\[1:2\] my_map=special_behavior\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("level", "error"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`E\[.+\] unknown \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("_msg", "Hello"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] Hello \s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("module", "main", "module", "crypto", "module", "wire"); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+module=wire\s+\n$`), buf.String()) + + buf.Reset() + if err := logger.Log("hash", []byte("test me")); err != nil { + t.Fatal(err) + } + assert.Regexp(t, regexp.MustCompile(`N\[.+\] unknown \s+ hash=74657374206D65\n$`), buf.String()) +} + +func BenchmarkTMFmtLoggerSimple(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), baseMessage) +} + +func BenchmarkTMFmtLoggerContextual(b *testing.B) { + benchmarkRunnerKitlog(b, log.NewTMFmtLogger(io.Discard), withMessage) +} + +func TestTMFmtLoggerConcurrency(t *testing.T) { + t.Parallel() + testConcurrency(t, log.NewTMFmtLogger(io.Discard), 10000) +} + +func benchmarkRunnerKitlog(b *testing.B, logger kitlog.Logger, f func(kitlog.Logger)) { + lc := kitlog.With(logger, "common_key", "common_value") + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + f(lc) + } +} + +var ( + baseMessage = func(logger kitlog.Logger) { logger.Log("foo_key", "foo_value") } //nolint:errcheck + withMessage = func(logger kitlog.Logger) { kitlog.With(logger, "a", "b").Log("d", "f") } //nolint:errcheck +) + +// These test are designed to be run with the race detector. + +func testConcurrency(t *testing.T, logger kitlog.Logger, total int) { + n := int(math.Sqrt(float64(total))) + share := total / n + + errC := make(chan error, n) + + for i := 0; i < n; i++ { + go func() { + errC <- spam(logger, share) + }() + } + + for i := 0; i < n; i++ { + err := <-errC + if err != nil { + t.Fatalf("concurrent logging error: %v", err) + } + } +} + +func spam(logger kitlog.Logger, count int) error { + for i := 0; i < count; i++ { + err := logger.Log("key", i) + if err != nil { + return err + } + } + return nil +} + +type mymap map[int]int + +func (m mymap) String() string { return "special_behavior" } diff --git a/libs/log/tracing_logger.go b/libs/log/tracing_logger.go new file mode 100644 index 0000000..d2a6ff4 --- /dev/null +++ b/libs/log/tracing_logger.go @@ -0,0 +1,76 @@ +package log + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// NewTracingLogger enables tracing by wrapping all errors (if they +// implement stackTracer interface) in tracedError. +// +// All errors returned by https://github.com/pkg/errors implement stackTracer +// interface. +// +// For debugging purposes only as it doubles the amount of allocations. +func NewTracingLogger(next Logger) Logger { + return &tracingLogger{ + next: next, + } +} + +type stackTracer interface { + error + StackTrace() errors.StackTrace +} + +type tracingLogger struct { + next Logger +} + +func (l *tracingLogger) Info(msg string, keyvals ...interface{}) { + l.next.Info(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Debug(msg string, keyvals ...interface{}) { + l.next.Debug(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) Error(msg string, keyvals ...interface{}) { + l.next.Error(msg, formatErrors(keyvals)...) +} + +func (l *tracingLogger) With(keyvals ...interface{}) Logger { + return &tracingLogger{next: l.next.With(formatErrors(keyvals)...)} +} + +func formatErrors(keyvals []interface{}) []interface{} { + newKeyvals := make([]interface{}, len(keyvals)) + copy(newKeyvals, keyvals) + for i := 0; i < len(newKeyvals)-1; i += 2 { + if err, ok := newKeyvals[i+1].(stackTracer); ok { + newKeyvals[i+1] = tracedError{err} + } + } + return newKeyvals +} + +// tracedError wraps a stackTracer and just makes the Error() result +// always return a full stack trace. +type tracedError struct { + wrapped stackTracer +} + +var _ stackTracer = tracedError{} + +func (t tracedError) StackTrace() errors.StackTrace { + return t.wrapped.StackTrace() +} + +func (t tracedError) Cause() error { + return t.wrapped +} + +func (t tracedError) Error() string { + return fmt.Sprintf("%+v", t.wrapped) +} diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go new file mode 100644 index 0000000..b9dee0f --- /dev/null +++ b/libs/log/tracing_logger_test.go @@ -0,0 +1,65 @@ +package log_test + +import ( + "bytes" + stderr "errors" + "fmt" + "strings" + "testing" + + "github.com/pkg/errors" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +func TestTracingLogger(t *testing.T) { + var buf bytes.Buffer + + logger := log.NewTMJSONLoggerNoTS(&buf) + + logger1 := log.NewTracingLogger(logger) + err1 := errors.New("courage is grace under pressure") + err2 := errors.New("it does not matter how slowly you go, so long as you do not stop") + logger1.With("err1", err1).Info("foo", "err2", err2) + + want := strings.ReplaceAll( + strings.ReplaceAll( + `{"_msg":"foo","err1":"`+ + fmt.Sprintf("%+v", err1)+ + `","err2":"`+ + fmt.Sprintf("%+v", err2)+ + `","level":"info"}`, + "\t", "", + ), "\n", "") + have := strings.ReplaceAll(strings.ReplaceAll(strings.TrimSpace(buf.String()), "\\n", ""), "\\t", "") + if want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With( + "err1", stderr.New("opportunities don't happen. You create them"), + ).Info( + "foo", "err2", stderr.New("once you choose hope, anything's possible"), + ) + + want = `{"_msg":"foo",` + + `"err1":"opportunities don't happen. You create them",` + + `"err2":"once you choose hope, anything's possible",` + + `"level":"info"}` + have = strings.TrimSpace(buf.String()) + if want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } + + buf.Reset() + + logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") + + want = `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}` + have = strings.TrimSpace(buf.String()) + if want != have { + t.Errorf("\nwant '%s'\nhave '%s'", want, have) + } +} diff --git a/libs/math/fraction.go b/libs/math/fraction.go new file mode 100644 index 0000000..a8d2855 --- /dev/null +++ b/libs/math/fraction.go @@ -0,0 +1,48 @@ +package math + +import ( + "errors" + "fmt" + "math" + "strconv" + "strings" +) + +// Fraction defined in terms of a numerator divided by a denominator in uint64 +// format. Fraction must be positive. +type Fraction struct { + // The portion of the denominator in the faction, e.g. 2 in 2/3. + Numerator uint64 `json:"numerator"` + // The value by which the numerator is divided, e.g. 3 in 2/3. + Denominator uint64 `json:"denominator"` +} + +func (fr Fraction) String() string { + return fmt.Sprintf("%d/%d", fr.Numerator, fr.Denominator) +} + +// ParseFractions takes the string of a fraction as input i.e "2/3" and converts this +// to the equivalent fraction else returns an error. The format of the string must be +// one number followed by a slash (/) and then the other number. +func ParseFraction(f string) (Fraction, error) { + o := strings.Split(f, "/") + if len(o) != 2 { + return Fraction{}, errors.New("incorrect formating: should have a single slash i.e. \"1/3\"") + } + numerator, err := strconv.ParseUint(o[0], 10, 64) + if err != nil { + return Fraction{}, fmt.Errorf("incorrect formatting, err: %w", err) + } + + denominator, err := strconv.ParseUint(o[1], 10, 64) + if err != nil { + return Fraction{}, fmt.Errorf("incorrect formatting, err: %w", err) + } + if denominator == 0 { + return Fraction{}, errors.New("denominator can't be 0") + } + if numerator > math.MaxInt64 || denominator > math.MaxInt64 { + return Fraction{}, fmt.Errorf("value overflow, numerator and denominator must be less than %d", int64(math.MaxInt64)) + } + return Fraction{Numerator: numerator, Denominator: denominator}, nil +} diff --git a/libs/math/fraction_test.go b/libs/math/fraction_test.go new file mode 100644 index 0000000..73ca0f6 --- /dev/null +++ b/libs/math/fraction_test.go @@ -0,0 +1,86 @@ +package math + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseFraction(t *testing.T) { + + testCases := []struct { + f string + exp Fraction + err bool + }{ + { + f: "2/3", + exp: Fraction{2, 3}, + err: false, + }, + { + f: "15/5", + exp: Fraction{15, 5}, + err: false, + }, + // test divide by zero error + { + f: "2/0", + exp: Fraction{}, + err: true, + }, + // test negative + { + f: "-1/2", + exp: Fraction{}, + err: true, + }, + { + f: "1/-2", + exp: Fraction{}, + err: true, + }, + // test overflow + { + f: "9223372036854775808/2", + exp: Fraction{}, + err: true, + }, + { + f: "2/9223372036854775808", + exp: Fraction{}, + err: true, + }, + { + f: "2/3/4", + exp: Fraction{}, + err: true, + }, + { + f: "123", + exp: Fraction{}, + err: true, + }, + { + f: "1a2/4", + exp: Fraction{}, + err: true, + }, + { + f: "1/3bc4", + exp: Fraction{}, + err: true, + }, + } + + for idx, tc := range testCases { + output, err := ParseFraction(tc.f) + if tc.err { + assert.Error(t, err, idx) + } else { + assert.NoError(t, err, idx) + } + assert.Equal(t, tc.exp, output, idx) + } + +} diff --git a/libs/math/math.go b/libs/math/math.go new file mode 100644 index 0000000..cf567a9 --- /dev/null +++ b/libs/math/math.go @@ -0,0 +1,31 @@ +package math + +func MaxInt64(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func MaxInt(a, b int) int { + if a > b { + return a + } + return b +} + +//----------------------------------------------------------------------------- + +func MinInt64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func MinInt(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/libs/math/safemath.go b/libs/math/safemath.go new file mode 100644 index 0000000..ff7f090 --- /dev/null +++ b/libs/math/safemath.go @@ -0,0 +1,65 @@ +package math + +import ( + "errors" + "math" +) + +var ErrOverflowInt32 = errors.New("int32 overflow") +var ErrOverflowUint8 = errors.New("uint8 overflow") +var ErrOverflowInt8 = errors.New("int8 overflow") + +// SafeAddInt32 adds two int32 integers +// If there is an overflow this will panic +func SafeAddInt32(a, b int32) int32 { + if b > 0 && (a > math.MaxInt32-b) { + panic(ErrOverflowInt32) + } else if b < 0 && (a < math.MinInt32-b) { + panic(ErrOverflowInt32) + } + return a + b +} + +// SafeSubInt32 subtracts two int32 integers +// If there is an overflow this will panic +func SafeSubInt32(a, b int32) int32 { + if b > 0 && (a < math.MinInt32+b) { + panic(ErrOverflowInt32) + } else if b < 0 && (a > math.MaxInt32+b) { + panic(ErrOverflowInt32) + } + return a - b +} + +// SafeConvertInt32 takes a int and checks if it overflows +// If there is an overflow this will panic +func SafeConvertInt32(a int64) int32 { + if a > math.MaxInt32 { + panic(ErrOverflowInt32) + } else if a < math.MinInt32 { + panic(ErrOverflowInt32) + } + return int32(a) +} + +// SafeConvertUint8 takes an int64 and checks if it overflows +// If there is an overflow it returns an error +func SafeConvertUint8(a int64) (uint8, error) { + if a > math.MaxUint8 { + return 0, ErrOverflowUint8 + } else if a < 0 { + return 0, ErrOverflowUint8 + } + return uint8(a), nil +} + +// SafeConvertInt8 takes an int64 and checks if it overflows +// If there is an overflow it returns an error +func SafeConvertInt8(a int64) (int8, error) { + if a > math.MaxInt8 { + return 0, ErrOverflowInt8 + } else if a < math.MinInt8 { + return 0, ErrOverflowInt8 + } + return int8(a), nil +} diff --git a/libs/net/net.go b/libs/net/net.go new file mode 100644 index 0000000..fa85256 --- /dev/null +++ b/libs/net/net.go @@ -0,0 +1,43 @@ +package net + +import ( + "net" + "strings" +) + +// Connect dials the given address and returns a net.Conn. The protoAddr argument should be prefixed with the protocol, +// eg. "tcp://127.0.0.1:8080" or "unix:///tmp/test.sock" +func Connect(protoAddr string) (net.Conn, error) { + proto, address := ProtocolAndAddress(protoAddr) + conn, err := net.Dial(proto, address) + return conn, err +} + +// ProtocolAndAddress splits an address into the protocol and address components. +// For instance, "tcp://127.0.0.1:8080" will be split into "tcp" and "127.0.0.1:8080". +// If the address has no protocol prefix, the default is "tcp". +func ProtocolAndAddress(listenAddr string) (string, string) { + protocol, address := "tcp", listenAddr + parts := strings.SplitN(address, "://", 2) + if len(parts) == 2 { + protocol, address = parts[0], parts[1] + } + return protocol, address +} + +// GetFreePort gets a free port from the operating system. +// Ripped from https://github.com/phayes/freeport. +// BSD-licensed. +func GetFreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} diff --git a/libs/net/net_test.go b/libs/net/net_test.go new file mode 100644 index 0000000..38cd58f --- /dev/null +++ b/libs/net/net_test.go @@ -0,0 +1,38 @@ +package net + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestProtocolAndAddress(t *testing.T) { + + cases := []struct { + fullAddr string + proto string + addr string + }{ + { + "tcp://mydomain:80", + "tcp", + "mydomain:80", + }, + { + "mydomain:80", + "tcp", + "mydomain:80", + }, + { + "unix://mydomain:80", + "unix", + "mydomain:80", + }, + } + + for _, c := range cases { + proto, addr := ProtocolAndAddress(c.fullAddr) + assert.Equal(t, proto, c.proto) + assert.Equal(t, addr, c.addr) + } +} diff --git a/libs/os/os.go b/libs/os/os.go new file mode 100644 index 0000000..fb64b85 --- /dev/null +++ b/libs/os/os.go @@ -0,0 +1,112 @@ +package os + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + "syscall" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +type logger interface { + Info(msg string, keyvals ...interface{}) +} + +// TrapSignal catches the SIGTERM/SIGINT and executes cb function. After that it exits +// with code 0. +func TrapSignal(logger logger, cb func()) { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + for sig := range c { + logger.Info("signal trapped", "msg", log.NewLazySprintf("captured %v, exiting...", sig)) + if cb != nil { + cb() + } + os.Exit(0) + } + }() +} + +// Kill the running process by sending itself SIGTERM. +func Kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + return p.Signal(syscall.SIGTERM) +} + +func Exit(s string) { + fmt.Printf(s + "\n") + os.Exit(1) +} + +// EnsureDir ensures the given directory exists, creating it if necessary. +// Errors if the path already exists as a non-directory. +func EnsureDir(dir string, mode os.FileMode) error { + err := os.MkdirAll(dir, mode) + if err != nil { + return fmt.Errorf("could not create directory %q: %w", dir, err) + } + return nil +} + +func FileExists(filePath string) bool { + _, err := os.Stat(filePath) + return !os.IsNotExist(err) +} + +func ReadFile(filePath string) ([]byte, error) { + return os.ReadFile(filePath) +} + +func MustReadFile(filePath string) []byte { + fileBytes, err := os.ReadFile(filePath) + if err != nil { + Exit(fmt.Sprintf("MustReadFile failed: %v", err)) + return nil + } + return fileBytes +} + +func WriteFile(filePath string, contents []byte, mode os.FileMode) error { + return os.WriteFile(filePath, contents, mode) +} + +func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { + err := WriteFile(filePath, contents, mode) + if err != nil { + Exit(fmt.Sprintf("MustWriteFile failed: %v", err)) + } +} + +// CopyFile copies a file. It truncates the destination file if it exists. +func CopyFile(src, dst string) error { + srcfile, err := os.Open(src) + if err != nil { + return err + } + defer srcfile.Close() + + info, err := srcfile.Stat() + if err != nil { + return err + } + if info.IsDir() { + return errors.New("cannot read from directories") + } + + // create new file, truncate if exists and apply same permissions as the original one + dstfile, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, info.Mode().Perm()) + if err != nil { + return err + } + defer dstfile.Close() + + _, err = io.Copy(dstfile, srcfile) + return err +} diff --git a/libs/os/os_test.go b/libs/os/os_test.go new file mode 100644 index 0000000..4de5184 --- /dev/null +++ b/libs/os/os_test.go @@ -0,0 +1,113 @@ +package os + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCopyFile(t *testing.T) { + tmpfile, err := os.CreateTemp("", "example") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpfile.Name()) + content := []byte("hello world") + if _, err := tmpfile.Write(content); err != nil { + t.Fatal(err) + } + + copyfile := fmt.Sprintf("%s.copy", tmpfile.Name()) + if err := CopyFile(tmpfile.Name(), copyfile); err != nil { + t.Fatal(err) + } + if _, err := os.Stat(copyfile); os.IsNotExist(err) { + t.Fatal("copy should exist") + } + data, err := os.ReadFile(copyfile) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(data, content) { + t.Fatalf("copy file content differs: expected %v, got %v", content, data) + } + os.Remove(copyfile) +} + +func TestEnsureDir(t *testing.T) { + tmp, err := os.MkdirTemp("", "ensure-dir") + require.NoError(t, err) + defer os.RemoveAll(tmp) + + // Should be possible to create a new directory. + err = EnsureDir(filepath.Join(tmp, "dir"), 0755) + require.NoError(t, err) + require.DirExists(t, filepath.Join(tmp, "dir")) + + // Should succeed on existing directory. + err = EnsureDir(filepath.Join(tmp, "dir"), 0755) + require.NoError(t, err) + + // Should fail on file. + err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) + require.NoError(t, err) + err = EnsureDir(filepath.Join(tmp, "file"), 0755) + require.Error(t, err) + + // Should allow symlink to dir. + err = os.Symlink(filepath.Join(tmp, "dir"), filepath.Join(tmp, "linkdir")) + require.NoError(t, err) + err = EnsureDir(filepath.Join(tmp, "linkdir"), 0755) + require.NoError(t, err) + + // Should error on symlink to file. + err = os.Symlink(filepath.Join(tmp, "file"), filepath.Join(tmp, "linkfile")) + require.NoError(t, err) + err = EnsureDir(filepath.Join(tmp, "linkfile"), 0755) + require.Error(t, err) +} + +// Ensure that using CopyFile does not truncate the destination file before +// the origin is positively a non-directory and that it is ready for copying. +// See https://github.com/tendermint/tendermint/issues/6427 +func TestTrickedTruncation(t *testing.T) { + tmpDir, err := os.MkdirTemp(os.TempDir(), "pwn_truncate") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpDir) + + originalWALPath := filepath.Join(tmpDir, "wal") + originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") + if err := os.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { + t.Fatal(err) + } + + // 1. Sanity check. + readWAL, err := os.ReadFile(originalWALPath) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(readWAL, originalWALContent) { + t.Fatalf("Cannot proceed as the content does not match\nGot: %q\nWant: %q", readWAL, originalWALContent) + } + + // 2. Now cause the truncation of the original file. + // It is absolutely legal to invoke os.Open on a directory. + if err := CopyFile(tmpDir, originalWALPath); err == nil { + t.Fatal("Expected an error") + } + + // 3. Check the WAL's content + reReadWAL, err := os.ReadFile(originalWALPath) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(reReadWAL, originalWALContent) { + t.Fatalf("Oops, the WAL's content was changed :(\nGot: %q\nWant: %q", reReadWAL, originalWALContent) + } +} diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go new file mode 100644 index 0000000..7553640 --- /dev/null +++ b/libs/pubsub/example_test.go @@ -0,0 +1,32 @@ +package pubsub_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + + "github.com/strangelove-ventures/cometbft-client/libs/pubsub" + "github.com/strangelove-ventures/cometbft-client/libs/pubsub/query" +) + +func TestExample(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + subscription, err := s.Subscribe(ctx, "example-client", query.MustCompile("abci.account.name='John'")) + require.NoError(t, err) + err = s.PublishWithEvents(ctx, "Tombstone", map[string][]string{"abci.account.name": {"John"}}) + require.NoError(t, err) + assertReceive(t, "Tombstone", subscription.Out()) +} diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go new file mode 100644 index 0000000..3d6d0ab --- /dev/null +++ b/libs/pubsub/pubsub.go @@ -0,0 +1,433 @@ +// Package pubsub implements a pub-sub model with a single publisher (Server) +// and multiple subscribers (clients). +// +// Though you can have multiple publishers by sharing a pointer to a server or +// by giving the same channel to each publisher and publishing messages from +// that channel (fan-in). +// +// Clients subscribe for messages, which could be of any type, using a query. +// When some message is published, we match it with all queries. If there is a +// match, this message will be pushed to all clients, subscribed to that query. +// See query subpackage for our implementation. +// +// Example: +// +// q, err := query.New("account.name='John'") +// if err != nil { +// return err +// } +// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) +// defer cancel() +// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) +// if err != nil { +// return err +// } +// +// for { +// select { +// case msg <- subscription.Out(): +// // handle msg.Data() and msg.Events() +// case <-subscription.Canceled(): +// return subscription.Err() +// } +// } +package pubsub + +import ( + "context" + "errors" + "fmt" + + "github.com/strangelove-ventures/cometbft-client/libs/service" + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +type operation int + +const ( + sub operation = iota + pub + unsub + shutdown +) + +var ( + // ErrSubscriptionNotFound is returned when a client tries to unsubscribe + // from not existing subscription. + ErrSubscriptionNotFound = errors.New("subscription not found") + + // ErrAlreadySubscribed is returned when a client tries to subscribe twice or + // more using the same query. + ErrAlreadySubscribed = errors.New("already subscribed") +) + +// Query defines an interface for a query to be used for subscribing. A query +// matches against a map of events. Each key in this map is a composite of the +// even type and an attribute key (e.g. "{eventType}.{eventAttrKey}") and the +// values are the event values that are contained under that relationship. This +// allows event types to repeat themselves with the same set of keys and +// different values. +type Query interface { + Matches(events map[string][]string) (bool, error) + String() string +} + +type cmd struct { + op operation + + // subscribe, unsubscribe + query Query + subscription *Subscription + clientID string + + // publish + msg interface{} + events map[string][]string +} + +// Server allows clients to subscribe/unsubscribe for messages, publishing +// messages with or without events, and manages internal state. +type Server struct { + service.BaseService + + cmds chan cmd + cmdsCap int + + // check if we have subscription before + // subscribing or unsubscribing + mtx cmtsync.RWMutex + subscriptions map[string]map[string]struct{} // subscriber -> query (string) -> empty struct +} + +// Option sets a parameter for the server. +type Option func(*Server) + +// NewServer returns a new server. See the commentary on the Option functions +// for a detailed description of how to configure buffering. If no options are +// provided, the resulting server's queue is unbuffered. +func NewServer(options ...Option) *Server { + s := &Server{ + subscriptions: make(map[string]map[string]struct{}), + } + s.BaseService = *service.NewBaseService(nil, "PubSub", s) + + for _, option := range options { + option(s) + } + + // if BufferCapacity option was not set, the channel is unbuffered + s.cmds = make(chan cmd, s.cmdsCap) + + return s +} + +// BufferCapacity allows you to specify capacity for the internal server's +// queue. Since the server, given Y subscribers, could only process X messages, +// this option could be used to survive spikes (e.g. high amount of +// transactions during peak hours). +func BufferCapacity(cap int) Option { + return func(s *Server) { + if cap > 0 { + s.cmdsCap = cap + } + } +} + +// BufferCapacity returns capacity of the internal server's queue. +func (s *Server) BufferCapacity() int { + return s.cmdsCap +} + +// Subscribe creates a subscription for the given client. +// +// An error will be returned to the caller if the context is canceled or if +// subscription already exist for pair clientID and query. +// +// outCapacity can be used to set a capacity for Subscription#Out channel (1 by +// default). Panics if outCapacity is less than or equal to zero. If you want +// an unbuffered channel, use SubscribeUnbuffered. +func (s *Server) Subscribe( + ctx context.Context, + clientID string, + query Query, + outCapacity ...int) (*Subscription, error) { + outCap := 1 + if len(outCapacity) > 0 { + if outCapacity[0] <= 0 { + panic("Negative or zero capacity. Use SubscribeUnbuffered if you want an unbuffered channel") + } + outCap = outCapacity[0] + } + + return s.subscribe(ctx, clientID, query, outCap) +} + +// SubscribeUnbuffered does the same as Subscribe, except it returns a +// subscription with unbuffered channel. Use with caution as it can freeze the +// server. +func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (*Subscription, error) { + return s.subscribe(ctx, clientID, query, 0) +} + +func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) { + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + _, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if ok { + return nil, ErrAlreadySubscribed + } + + subscription := NewSubscription(outCapacity) + select { + case s.cmds <- cmd{op: sub, clientID: clientID, query: query, subscription: subscription}: + s.mtx.Lock() + if _, ok = s.subscriptions[clientID]; !ok { + s.subscriptions[clientID] = make(map[string]struct{}) + } + s.subscriptions[clientID][query.String()] = struct{}{} + s.mtx.Unlock() + return subscription, nil + case <-ctx.Done(): + return nil, ctx.Err() + case <-s.Quit(): + return nil, errors.New("service is shutting down") + } +} + +// Unsubscribe removes the subscription on the given query. An error will be +// returned to the caller if the context is canceled or if subscription does +// not exist. +func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { + s.mtx.RLock() + clientSubscriptions, ok := s.subscriptions[clientID] + if ok { + _, ok = clientSubscriptions[query.String()] + } + s.mtx.RUnlock() + if !ok { + return ErrSubscriptionNotFound + } + + select { + case s.cmds <- cmd{op: unsub, clientID: clientID, query: query}: + s.mtx.Lock() + delete(clientSubscriptions, query.String()) + if len(clientSubscriptions) == 0 { + delete(s.subscriptions, clientID) + } + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + case <-s.Quit(): + return nil + } +} + +// UnsubscribeAll removes all client subscriptions. An error will be returned +// to the caller if the context is canceled or if subscription does not exist. +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.mtx.RLock() + _, ok := s.subscriptions[clientID] + s.mtx.RUnlock() + if !ok { + return ErrSubscriptionNotFound + } + + select { + case s.cmds <- cmd{op: unsub, clientID: clientID}: + s.mtx.Lock() + delete(s.subscriptions, clientID) + s.mtx.Unlock() + return nil + case <-ctx.Done(): + return ctx.Err() + case <-s.Quit(): + return nil + } +} + +// NumClients returns the number of clients. +func (s *Server) NumClients() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + return len(s.subscriptions) +} + +// NumClientSubscriptions returns the number of subscriptions the client has. +func (s *Server) NumClientSubscriptions(clientID string) int { + s.mtx.RLock() + defer s.mtx.RUnlock() + return len(s.subscriptions[clientID]) +} + +// Publish publishes the given message. An error will be returned to the caller +// if the context is canceled. +func (s *Server) Publish(ctx context.Context, msg interface{}) error { + return s.PublishWithEvents(ctx, msg, make(map[string][]string)) +} + +// PublishWithEvents publishes the given message with the set of events. The set +// is matched with clients queries. If there is a match, the message is sent to +// the client. +func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events map[string][]string) error { + select { + case s.cmds <- cmd{op: pub, msg: msg, events: events}: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-s.Quit(): + return nil + } +} + +// OnStop implements Service.OnStop by shutting down the server. +func (s *Server) OnStop() { + s.cmds <- cmd{op: shutdown} +} + +// NOTE: not goroutine safe +type state struct { + // query string -> client -> subscription + subscriptions map[string]map[string]*Subscription + // query string -> queryPlusRefCount + queries map[string]*queryPlusRefCount +} + +// queryPlusRefCount holds a pointer to a query and reference counter. When +// refCount is zero, query will be removed. +type queryPlusRefCount struct { + q Query + refCount int +} + +// OnStart implements Service.OnStart by starting the server. +func (s *Server) OnStart() error { + go s.loop(state{ + subscriptions: make(map[string]map[string]*Subscription), + queries: make(map[string]*queryPlusRefCount), + }) + return nil +} + +// OnReset implements Service.OnReset +func (s *Server) OnReset() error { + return nil +} + +func (s *Server) loop(state state) { +loop: + for cmd := range s.cmds { + switch cmd.op { + case unsub: + if cmd.query != nil { + state.remove(cmd.clientID, cmd.query.String(), ErrUnsubscribed) + } else { + state.removeClient(cmd.clientID, ErrUnsubscribed) + } + case shutdown: + state.removeAll(nil) + break loop + case sub: + state.add(cmd.clientID, cmd.query, cmd.subscription) + case pub: + if err := state.send(cmd.msg, cmd.events); err != nil { + s.Logger.Error("Error querying for events", "err", err) + } + } + } +} + +func (state *state) add(clientID string, q Query, subscription *Subscription) { + qStr := q.String() + + // initialize subscription for this client per query if needed + if _, ok := state.subscriptions[qStr]; !ok { + state.subscriptions[qStr] = make(map[string]*Subscription) + } + // create subscription + state.subscriptions[qStr][clientID] = subscription + + // initialize query if needed + if _, ok := state.queries[qStr]; !ok { + state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0} + } + // increment reference counter + state.queries[qStr].refCount++ +} + +func (state *state) remove(clientID string, qStr string, reason error) { + clientSubscriptions, ok := state.subscriptions[qStr] + if !ok { + return + } + + subscription, ok := clientSubscriptions[clientID] + if !ok { + return + } + + subscription.cancel(reason) + + // remove client from query map. + // if query has no other clients subscribed, remove it. + delete(state.subscriptions[qStr], clientID) + if len(state.subscriptions[qStr]) == 0 { + delete(state.subscriptions, qStr) + } + + // decrease ref counter in queries + state.queries[qStr].refCount-- + // remove the query if nobody else is using it + if state.queries[qStr].refCount == 0 { + delete(state.queries, qStr) + } +} + +func (state *state) removeClient(clientID string, reason error) { + for qStr, clientSubscriptions := range state.subscriptions { + if _, ok := clientSubscriptions[clientID]; ok { + state.remove(clientID, qStr, reason) + } + } +} + +func (state *state) removeAll(reason error) { + for qStr, clientSubscriptions := range state.subscriptions { + for clientID := range clientSubscriptions { + state.remove(clientID, qStr, reason) + } + } +} + +func (state *state) send(msg interface{}, events map[string][]string) error { + for qStr, clientSubscriptions := range state.subscriptions { + q := state.queries[qStr].q + + match, err := q.Matches(events) + if err != nil { + return fmt.Errorf("failed to match against query %s: %w", q.String(), err) + } + + if match { + for clientID, subscription := range clientSubscriptions { + if cap(subscription.out) == 0 { + // block on unbuffered channel + subscription.out <- NewMessage(msg, events) + } else { + // don't block on buffered channels + select { + case subscription.out <- NewMessage(msg, events): + default: + state.remove(clientID, qStr, ErrOutOfCapacity) + } + } + } + } + } + + return nil +} diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go new file mode 100644 index 0000000..5e915f0 --- /dev/null +++ b/libs/pubsub/pubsub_test.go @@ -0,0 +1,507 @@ +package pubsub_test + +import ( + "context" + "fmt" + "runtime/debug" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + + "github.com/strangelove-ventures/cometbft-client/libs/pubsub" + "github.com/strangelove-ventures/cometbft-client/libs/pubsub/query" +) + +const ( + clientID = "test-client" +) + +func TestSubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + subscription, err := s.Subscribe(ctx, clientID, query.All) + require.NoError(t, err) + + assert.Equal(t, 1, s.NumClients()) + assert.Equal(t, 1, s.NumClientSubscriptions(clientID)) + + err = s.Publish(ctx, "Ka-Zar") + require.NoError(t, err) + assertReceive(t, "Ka-Zar", subscription.Out()) + + published := make(chan struct{}) + go func() { + defer close(published) + + err := s.Publish(ctx, "Quicksilver") + assert.NoError(t, err) + + err = s.Publish(ctx, "Asylum") + assert.NoError(t, err) + + err = s.Publish(ctx, "Ivan") + assert.NoError(t, err) + }() + + select { + case <-published: + assertReceive(t, "Quicksilver", subscription.Out()) + assertCancelled(t, subscription, pubsub.ErrOutOfCapacity) + case <-time.After(3 * time.Second): + t.Fatal("Expected Publish(Asylum) not to block") + } +} + +func TestSubscribeWithCapacity(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + assert.Panics(t, func() { + _, err = s.Subscribe(ctx, clientID, query.All, -1) + require.NoError(t, err) + }) + assert.Panics(t, func() { + _, err = s.Subscribe(ctx, clientID, query.All, 0) + require.NoError(t, err) + }) + subscription, err := s.Subscribe(ctx, clientID, query.All, 1) + require.NoError(t, err) + err = s.Publish(ctx, "Aggamon") + require.NoError(t, err) + assertReceive(t, "Aggamon", subscription.Out()) +} + +func TestSubscribeUnbuffered(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.All) + require.NoError(t, err) + + published := make(chan struct{}) + go func() { + defer close(published) + + err := s.Publish(ctx, "Ultron") + assert.NoError(t, err) + + err = s.Publish(ctx, "Darkhawk") + assert.NoError(t, err) + }() + + select { + case <-published: + t.Fatal("Expected Publish(Darkhawk) to block") + case <-time.After(3 * time.Second): + assertReceive(t, "Ultron", subscription.Out()) + assertReceive(t, "Darkhawk", subscription.Out()) + } +} + +func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + subscription, err := s.Subscribe(ctx, clientID, query.All) + require.NoError(t, err) + err = s.Publish(ctx, "Fat Cobra") + require.NoError(t, err) + err = s.Publish(ctx, "Viper") + require.NoError(t, err) + + assertCancelled(t, subscription, pubsub.ErrOutOfCapacity) +} + +func TestDifferentClients(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + subscription1, err := s.Subscribe(ctx, "client-1", query.MustCompile("tm.events.type='NewBlock'")) + require.NoError(t, err) + err = s.PublishWithEvents(ctx, "Iceman", map[string][]string{"tm.events.type": {"NewBlock"}}) + require.NoError(t, err) + assertReceive(t, "Iceman", subscription1.Out()) + + subscription2, err := s.Subscribe( + ctx, + "client-2", + query.MustCompile("tm.events.type='NewBlock' AND abci.account.name='Igor'"), + ) + require.NoError(t, err) + err = s.PublishWithEvents( + ctx, + "Ultimo", + map[string][]string{"tm.events.type": {"NewBlock"}, "abci.account.name": {"Igor"}}, + ) + require.NoError(t, err) + assertReceive(t, "Ultimo", subscription1.Out()) + assertReceive(t, "Ultimo", subscription2.Out()) + + subscription3, err := s.Subscribe( + ctx, + "client-3", + query.MustCompile("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), + ) + require.NoError(t, err) + err = s.PublishWithEvents(ctx, "Valeria Richards", map[string][]string{"tm.events.type": {"NewRoundStep"}}) + require.NoError(t, err) + assert.Zero(t, len(subscription3.Out())) +} + +func TestSubscribeDuplicateKeys(t *testing.T) { + ctx := context.Background() + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + require.NoError(t, s.Start()) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + testCases := []struct { + query string + expected interface{} + }{ + { + "withdraw.rewards='17'", + "Iceman", + }, + { + "withdraw.rewards='22'", + "Iceman", + }, + { + "withdraw.rewards='1' AND withdraw.rewards='22'", + "Iceman", + }, + { + "withdraw.rewards='100'", + nil, + }, + } + + for i, tc := range testCases { + sub, err := s.Subscribe(ctx, fmt.Sprintf("client-%d", i), query.MustCompile(tc.query)) + require.NoError(t, err) + + err = s.PublishWithEvents( + ctx, + "Iceman", + map[string][]string{ + "transfer.sender": {"foo", "bar", "baz"}, + "withdraw.rewards": {"1", "17", "22"}, + }, + ) + require.NoError(t, err) + + if tc.expected != nil { + assertReceive(t, tc.expected, sub.Out()) + } else { + require.Zero(t, len(sub.Out())) + } + } +} + +func TestClientSubscribesTwice(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + q := query.MustCompile("tm.events.type='NewBlock'") + + subscription1, err := s.Subscribe(ctx, clientID, q) + require.NoError(t, err) + err = s.PublishWithEvents(ctx, "Goblin Queen", map[string][]string{"tm.events.type": {"NewBlock"}}) + require.NoError(t, err) + assertReceive(t, "Goblin Queen", subscription1.Out()) + + subscription2, err := s.Subscribe(ctx, clientID, q) + require.Error(t, err) + require.Nil(t, subscription2) + + err = s.PublishWithEvents(ctx, "Spider-Man", map[string][]string{"tm.events.type": {"NewBlock"}}) + require.NoError(t, err) + assertReceive(t, "Spider-Man", subscription1.Out()) +} + +func TestUnsubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + subscription, err := s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) + require.NoError(t, err) + + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(subscription.Out()), "Should not receive anything after Unsubscribe") + + assertCancelled(t, subscription, pubsub.ErrUnsubscribed) +} + +func TestClientUnsubscribesTwice(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + _, err = s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) + require.NoError(t, err) + + err = s.Unsubscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) + assert.Equal(t, pubsub.ErrSubscriptionNotFound, err) + err = s.UnsubscribeAll(ctx, clientID) + assert.Equal(t, pubsub.ErrSubscriptionNotFound, err) +} + +func TestResubscribe(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + _, err = s.Subscribe(ctx, clientID, query.All) + require.NoError(t, err) + err = s.Unsubscribe(ctx, clientID, query.All) + require.NoError(t, err) + subscription, err := s.Subscribe(ctx, clientID, query.All) + require.NoError(t, err) + + err = s.Publish(ctx, "Cable") + require.NoError(t, err) + assertReceive(t, "Cable", subscription.Out()) +} + +func TestUnsubscribeAll(t *testing.T) { + s := pubsub.NewServer() + s.SetLogger(log.TestingLogger()) + err := s.Start() + require.NoError(t, err) + t.Cleanup(func() { + if err := s.Stop(); err != nil { + t.Error(err) + } + }) + + ctx := context.Background() + subscription1, err := s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) + require.NoError(t, err) + subscription2, err := s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlockHeader'")) + require.NoError(t, err) + + err = s.UnsubscribeAll(ctx, clientID) + require.NoError(t, err) + + err = s.Publish(ctx, "Nick Fury") + require.NoError(t, err) + assert.Zero(t, len(subscription1.Out()), "Should not receive anything after UnsubscribeAll") + assert.Zero(t, len(subscription2.Out()), "Should not receive anything after UnsubscribeAll") + + assertCancelled(t, subscription1, pubsub.ErrUnsubscribed) + assertCancelled(t, subscription2, pubsub.ErrUnsubscribed) +} + +func TestBufferCapacity(t *testing.T) { + s := pubsub.NewServer(pubsub.BufferCapacity(2)) + s.SetLogger(log.TestingLogger()) + + assert.Equal(t, 2, s.BufferCapacity()) + + ctx := context.Background() + err := s.Publish(ctx, "Nighthawk") + require.NoError(t, err) + err = s.Publish(ctx, "Sage") + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + err = s.Publish(ctx, "Ironclad") + if assert.Error(t, err) { + assert.Equal(t, context.DeadlineExceeded, err) + } +} + +func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } +func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } +func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } + +func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } +func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } +func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } + +func benchmarkNClients(n int, b *testing.B) { + s := pubsub.NewServer() + err := s.Start() + require.NoError(b, err) + + b.Cleanup(func() { + if err := s.Stop(); err != nil { + b.Error(err) + } + }) + + ctx := context.Background() + for i := 0; i < n; i++ { + subscription, err := s.Subscribe( + ctx, + clientID, + query.MustCompile(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), + ) + if err != nil { + b.Fatal(err) + } + go func() { + for { + select { + case <-subscription.Out(): + continue + case <-subscription.Canceled(): + return + } + } + }() + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = s.PublishWithEvents( + ctx, + "Gamora", + map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(rune(i))}}, + ) + require.NoError(b, err) + } +} + +func benchmarkNClientsOneQuery(n int, b *testing.B) { + s := pubsub.NewServer() + err := s.Start() + require.NoError(b, err) + b.Cleanup(func() { + if err := s.Stop(); err != nil { + b.Error(err) + } + }) + + ctx := context.Background() + q := query.MustCompile("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") + for i := 0; i < n; i++ { + subscription, err := s.Subscribe(ctx, clientID, q) + if err != nil { + b.Fatal(err) + } + go func() { + for { + select { + case <-subscription.Out(): + continue + case <-subscription.Canceled(): + return + } + } + }() + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, + "abci.Invoices.Number": {"1"}}) + require.NoError(b, err) + } +} + +// HELPERS + +func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { + select { + case actual := <-ch: + assert.Equal(t, expected, actual.Data(), msgAndArgs...) + case <-time.After(1 * time.Second): + t.Errorf("expected to receive %v from the channel, got nothing after 1s", expected) + debug.PrintStack() + } +} + +func assertCancelled(t *testing.T, subscription *pubsub.Subscription, err error) { + _, ok := <-subscription.Canceled() + assert.False(t, ok) + assert.Equal(t, err, subscription.Err()) +} diff --git a/libs/pubsub/query/.gitignore b/libs/pubsub/query/.gitignore new file mode 100644 index 0000000..8b53014 --- /dev/null +++ b/libs/pubsub/query/.gitignore @@ -0,0 +1,7 @@ +# This is a temporary directory to hold the peg binary, +# to work around https://github.com/pointlander/peg/issues/129. +# Note that once we have a new version of peg fixing #129, +# we may still want to keep this .gitignore to prevent anyone +# from accidentally running "git add ." and including their built +# peg binary in a commit. +.bin/ diff --git a/libs/pubsub/query/bench_test.go b/libs/pubsub/query/bench_test.go new file mode 100644 index 0000000..31737cf --- /dev/null +++ b/libs/pubsub/query/bench_test.go @@ -0,0 +1,46 @@ +package query_test + +import ( + "testing" + + "github.com/strangelove-ventures/cometbft-client/libs/pubsub/query" +) + +const testQuery = `tm.events.type='NewBlock' AND abci.account.name='Igor'` + +var testEvents = map[string][]string{ + "tm.events.index": { + "25", + }, + "tm.events.type": { + "NewBlock", + }, + "abci.account.name": { + "Anya", "Igor", + }, +} + +func BenchmarkParseCustom(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := query.New(testQuery) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkMatchCustom(b *testing.B) { + q, err := query.New(testQuery) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + ok, err := q.Matches(testEvents) + if err != nil { + b.Fatal(err) + } else if !ok { + b.Error("no match") + } + } +} diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go new file mode 100644 index 0000000..9491699 --- /dev/null +++ b/libs/pubsub/query/query.go @@ -0,0 +1,357 @@ +// Package query implements the custom query format used to filter event +// subscriptions in CometBFT. +// +// abci.invoice.number=22 AND abci.invoice.owner=Ivan +// +// Query expressions can handle attribute values encoding numbers, strings, +// dates, and timestamps. The complete query grammar is described in the +// query/syntax package. +package query + +import ( + "fmt" + "math/big" + "regexp" + "strings" + "time" + + "github.com/strangelove-ventures/cometbft-client/abci/types" + "github.com/strangelove-ventures/cometbft-client/libs/pubsub/query/syntax" +) + +// All is a query that matches all events. +var All *Query + +// A Query is the compiled form of a query. +type Query struct { + ast syntax.Query + conds []condition +} + +// New parses and compiles the query expression into an executable query. +func New(query string) (*Query, error) { + ast, err := syntax.Parse(query) + if err != nil { + return nil, err + } + return Compile(ast) +} + +// MustCompile compiles the query expression into an executable query. +// In case of error, MustCompile will panic. +// +// This is intended for use in program initialization; use query.New if you +// need to check errors. +func MustCompile(query string) *Query { + q, err := New(query) + if err != nil { + panic(err) + } + return q +} + +// Compile compiles the given query AST so it can be used to match events. +func Compile(ast syntax.Query) (*Query, error) { + conds := make([]condition, len(ast)) + for i, q := range ast { + cond, err := compileCondition(q) + if err != nil { + return nil, fmt.Errorf("compile %s: %w", q, err) + } + conds[i] = cond + } + return &Query{ast: ast, conds: conds}, nil +} + +func ExpandEvents(flattenedEvents map[string][]string) []types.Event { + events := make([]types.Event, 0) + + for composite, values := range flattenedEvents { + tokens := strings.Split(composite, ".") + + attrs := make([]types.EventAttribute, len(values)) + for i, v := range values { + attrs[i] = types.EventAttribute{ + Key: tokens[len(tokens)-1], + Value: v, + } + } + + events = append(events, types.Event{ + Type: strings.Join(tokens[:len(tokens)-1], "."), + Attributes: attrs, + }) + } + + return events +} + +// Matches satisfies part of the pubsub.Query interface. This implementation +// never reports an error. A nil *Query matches all events. +func (q *Query) Matches(events map[string][]string) (bool, error) { + if q == nil { + return true, nil + } + return q.matchesEvents(ExpandEvents(events)), nil +} + +// String matches part of the pubsub.Query interface. +func (q *Query) String() string { + if q == nil { + return "" + } + return q.ast.String() +} + +// Syntax returns the syntax tree representation of q. +func (q *Query) Syntax() syntax.Query { + if q == nil { + return nil + } + return q.ast +} + +// matchesEvents reports whether all the conditions match the given events. +func (q *Query) matchesEvents(events []types.Event) bool { + for _, cond := range q.conds { + if !cond.matchesAny(events) { + return false + } + } + return len(events) != 0 +} + +// A condition is a compiled match condition. A condition matches an event if +// the event has the designated type, contains an attribute with the given +// name, and the match function returns true for the attribute value. +type condition struct { + tag string // e.g., "tx.hash" + match func(s string) bool +} + +// findAttr returns a slice of attribute values from event matching the +// condition tag, and reports whether the event type strictly equals the +// condition tag. +func (c condition) findAttr(event types.Event) ([]string, bool) { + if !strings.HasPrefix(c.tag, event.Type) { + return nil, false // type does not match tag + } else if len(c.tag) == len(event.Type) { + return nil, true // type == tag + } + var vals []string + for _, attr := range event.Attributes { + fullName := event.Type + "." + attr.Key + if fullName == c.tag { + vals = append(vals, attr.Value) + } + } + return vals, false +} + +// matchesAny reports whether c matches at least one of the given events. +func (c condition) matchesAny(events []types.Event) bool { + for _, event := range events { + if c.matchesEvent(event) { + return true + } + } + return false +} + +// matchesEvent reports whether c matches the given event. +func (c condition) matchesEvent(event types.Event) bool { + vs, tagEqualsType := c.findAttr(event) + if len(vs) == 0 { + // As a special case, a condition tag that exactly matches the event type + // is matched against an empty string. This allows existence checks to + // work for type-only queries. + if tagEqualsType { + return c.match("") + } + return false + } + + // At this point, we have candidate values. + for _, v := range vs { + if c.match(v) { + return true + } + } + return false +} + +func compileCondition(cond syntax.Condition) (condition, error) { + out := condition{tag: cond.Tag} + + // Handle existence checks separately to simplify the logic below for + // comparisons that take arguments. + if cond.Op == syntax.TExists { + out.match = func(string) bool { return true } + return out, nil + } + + // All the other operators require an argument. + if cond.Arg == nil { + return condition{}, fmt.Errorf("missing argument for %v", cond.Op) + } + + // Precompile the argument value matcher. + argType := cond.Arg.Type + var argValue interface{} + + switch argType { + case syntax.TString: + argValue = cond.Arg.Value() + case syntax.TNumber: + argValue = cond.Arg.Number() + case syntax.TTime, syntax.TDate: + argValue = cond.Arg.Time() + default: + return condition{}, fmt.Errorf("unknown argument type %v", argType) + } + + mcons := opTypeMap[cond.Op][argType] + if mcons == nil { + return condition{}, fmt.Errorf("invalid op/arg combination (%v, %v)", cond.Op, argType) + } + out.match = mcons(argValue) + return out, nil +} + +// We use this regex to support queries of the form "8atom", "6.5stake", +// which are actively used in production. +// The regex takes care of removing the non-number suffix. +var extractNum = regexp.MustCompile(`^\d+(\.\d+)?`) + +func parseNumber(s string) (*big.Float, error) { + intVal := new(big.Int) + if _, ok := intVal.SetString(s, 10); !ok { + f, _, err := big.ParseFloat(extractNum.FindString(s), 10, 125, big.ToNearestEven) + if err != nil { + return nil, err + } + return f, err + } + f, _, err := big.ParseFloat(extractNum.FindString(s), 10, uint(intVal.BitLen()), big.ToNearestEven) + return f, err + +} + +// A map of operator ⇒ argtype ⇒ match-constructor. +// An entry does not exist if the combination is not valid. +// +// Disable the dupl lint for this map. The result isn't even correct. +// +//nolint:dupl +var opTypeMap = map[syntax.Token]map[syntax.Token]func(interface{}) func(string) bool{ + syntax.TContains: { + syntax.TString: func(v interface{}) func(string) bool { + return func(s string) bool { + return strings.Contains(s, v.(string)) + } + }, + }, + syntax.TEq: { + syntax.TString: func(v interface{}) func(string) bool { + return func(s string) bool { return s == v.(string) } + }, + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w.Cmp(v.(*big.Float)) == 0 + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && ts.Equal(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && ts.Equal(v.(time.Time)) + } + }, + }, + syntax.TLt: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w.Cmp(v.(*big.Float)) < 0 + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && ts.Before(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && ts.Before(v.(time.Time)) + } + }, + }, + syntax.TLeq: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w.Cmp(v.(*big.Float)) <= 0 + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && !ts.After(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && !ts.After(v.(time.Time)) + } + }, + }, + syntax.TGt: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w.Cmp(v.(*big.Float)) > 0 + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && ts.After(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && ts.After(v.(time.Time)) + } + }, + }, + syntax.TGeq: { + syntax.TNumber: func(v interface{}) func(string) bool { + return func(s string) bool { + w, err := parseNumber(s) + return err == nil && w.Cmp(v.(*big.Float)) >= 0 + } + }, + syntax.TDate: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseDate(s) + return err == nil && !ts.Before(v.(time.Time)) + } + }, + syntax.TTime: func(v interface{}) func(string) bool { + return func(s string) bool { + ts, err := syntax.ParseTime(s) + return err == nil && !ts.Before(v.(time.Time)) + } + }, + }, +} diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go new file mode 100644 index 0000000..09de9ba --- /dev/null +++ b/libs/pubsub/query/query_test.go @@ -0,0 +1,486 @@ +package query_test + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/abci/types" + "github.com/strangelove-ventures/cometbft-client/libs/pubsub" + "github.com/strangelove-ventures/cometbft-client/libs/pubsub/query" + "github.com/strangelove-ventures/cometbft-client/libs/pubsub/query/syntax" +) + +var _ pubsub.Query = (*query.Query)(nil) + +// Example events from the OpenAPI documentation: +// +// https://github.com/cometbft/cometbft/blob/master/rpc/openapi/openapi.yaml +// +// Redactions: +// +// - Add an explicit "tm" event for the built-in attributes. +// - Remove Index fields (not relevant to tests). +// - Add explicit balance values (to use in tests). +var apiEvents = map[string][]string{ + "tm.event": { + "Tx", + }, + "tm.hash": { + "XYZ", + }, + "tm.height": { + "5", + }, + "rewards.withdraw.address": { + "AddrA", + "AddrB", + }, + "rewards.withdraw.source": { + "SrcX", + "SrcY", + }, + "rewards.withdraw.amount": { + "100", + "45", + }, + "rewards.withdraw.balance": { + "1500", + "999", + }, + "transfer.sender": { + "AddrC", + }, + "transfer.recipient": { + "AddrD", + }, + "transfer.amount": { + "160", + }, +} + +var apiTypeEvents = []types.Event{ + { + Type: "tm", + Attributes: []types.EventAttribute{ + { + Key: "event", + Value: "Tx", + }, + }, + }, + { + Type: "tm", + Attributes: []types.EventAttribute{ + { + Key: "hash", + Value: "XYZ", + }, + }, + }, + { + Type: "tm", + Attributes: []types.EventAttribute{ + { + Key: "height", + Value: "5", + }, + }, + }, + { + Type: "rewards.withdraw", + Attributes: []types.EventAttribute{ + { + Key: "address", + Value: "AddrA", + }, + { + Key: "address", + Value: "AddrB", + }, + }, + }, + { + Type: "rewards.withdraw", + Attributes: []types.EventAttribute{ + { + Key: "source", + Value: "SrcX", + }, + { + Key: "source", + Value: "SrcY", + }, + }, + }, + { + Type: "rewards.withdraw", + Attributes: []types.EventAttribute{ + { + Key: "amount", + Value: "100", + }, + { + Key: "amount", + Value: "45", + }, + }, + }, + { + Type: "rewards.withdraw", + Attributes: []types.EventAttribute{ + { + Key: "balance", + Value: "1500", + }, + { + Key: "balance", + Value: "999", + }, + }, + }, + { + Type: "transfer", + Attributes: []types.EventAttribute{ + { + Key: "sender", + Value: "AddrC", + }, + }, + }, + { + Type: "transfer", + Attributes: []types.EventAttribute{ + { + Key: "recipient", + Value: "AddrD", + }, + }, + }, + { + Type: "transfer", + Attributes: []types.EventAttribute{ + { + Key: "amount", + Value: "160", + }, + }, + }, +} + +func TestBigNumbers(t *testing.T) { + + apiBigNumTest := map[string][]string{ + "big.value": { + "99999999999999999999", + }, + "big2.value": { + "18446744073709551615", // max(uint64) == 18446744073709551615 + }, + "big.floatvalue": { + "99999999999999999999.10", + }, + "big2.floatvalue": { + "18446744073709551615.6", // max(uint64) == 18446744073709551615 + }, + } + + testCases := []struct { + s string + events map[string][]string + matches bool + }{ + + // Test cases for values that exceed the capacity if int64/float64. + {`big.value >= 99999999999999999999`, + apiBigNumTest, + true}, + {`big.value > 99999999999999999998`, + apiBigNumTest, + true}, + {`big2.value <= 18446744073709551615`, + apiBigNumTest, true}, + {`big.floatvalue >= 99999999999999999999`, + apiBigNumTest, + true}, + {`big.floatvalue > 99999999999999999998.10`, + apiBigNumTest, + true}, + {`big.floatvalue > 99999999999999999998`, + apiBigNumTest, + true}, + {`big2.floatvalue <= 18446744073709551615.6`, + apiBigNumTest, + true}, + {`big2.floatvalue <= 18446744073709551615.6`, + apiBigNumTest, + true}, + {`big2.floatvalue >= 18446744073709551615`, + apiBigNumTest, + true}, + {`big2.floatvalue >= 12.5`, + apiBigNumTest, + true}, + {`big.value >= 10`, + apiBigNumTest, + true}, + } + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%02d", i+1), func(t *testing.T) { + c, err := query.New(tc.s) + if err != nil { + t.Fatalf("NewCompiled %#q: unexpected error: %v", tc.s, err) + } + + got, err := c.Matches(tc.events) + if err != nil { + t.Errorf("Query: %#q\nInput: %+v\nMatches: got error %v", + tc.s, tc.events, err) + } + if got != tc.matches { + t.Errorf("Query: %#q\nInput: %+v\nMatches: got %v, want %v", + tc.s, tc.events, got, tc.matches) + } + }) + } +} + +func TestCompiledMatches(t *testing.T) { + var ( + txDate = "2017-01-01" + txTime = "2018-05-03T14:45:00Z" + ) + + //nolint:lll + testCases := []struct { + s string + events map[string][]string + matches bool + }{ + {`tm.events.type='NewBlock'`, + newTestEvents(`tm|events.type=NewBlock`), + true}, + {`tx.gas > 7`, + newTestEvents(`tx|gas=8`), + true}, + {`transfer.amount > 7`, + newTestEvents(`transfer|amount=8stake`), + true}, + {`transfer.amount > 7`, + newTestEvents(`transfer|amount=8.045`), + true}, + {`transfer.amount > 7.043`, + newTestEvents(`transfer|amount=8.045stake`), + true}, + {`transfer.amount > 8.045`, + newTestEvents(`transfer|amount=8.045stake`), + false}, + {`tx.gas > 7 AND tx.gas < 9`, + newTestEvents(`tx|gas=8`), + true}, + {`body.weight >= 3.5`, + newTestEvents(`body|weight=3.5`), + true}, + {`account.balance < 1000.0`, + newTestEvents(`account|balance=900`), + true}, + {`apples.kg <= 4`, + newTestEvents(`apples|kg=4.0`), + true}, + {`body.weight >= 4.5`, + newTestEvents(`body|weight=4.5`), + true}, + {`oranges.kg < 4 AND watermellons.kg > 10`, + newTestEvents(`oranges|kg=3`, `watermellons|kg=12`), + true}, + {`peaches.kg < 4`, + newTestEvents(`peaches|kg=5`), + false}, + {`tx.date > DATE 2017-01-01`, + newTestEvents(`tx|date=` + time.Now().Format(syntax.DateFormat)), + true}, + {`tx.date = DATE 2017-01-01`, + newTestEvents(`tx|date=` + txDate), + true}, + {`tx.date = DATE 2018-01-01`, + newTestEvents(`tx|date=` + txDate), + false}, + {`tx.time >= TIME 2013-05-03T14:45:00Z`, + newTestEvents(`tx|time=` + time.Now().Format(syntax.TimeFormat)), + true}, + {`tx.time = TIME 2013-05-03T14:45:00Z`, + newTestEvents(`tx|time=` + txTime), + false}, + {`abci.owner.name CONTAINS 'Igor'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name CONTAINS 'Igor'`, + newTestEvents(`abci|owner.name=Pavel|owner.name=Ivan`), + false}, + {`abci.owner.name = 'Igor'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name = 'Ivan'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + true}, + {`abci.owner.name = 'Ivan' AND abci.owner.name = 'John'`, + newTestEvents(`abci|owner.name=Igor|owner.name=Ivan`), + false}, + {`tm.events.type='NewBlock'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + true}, + {`app.name = 'fuzzed'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + true}, + {`tm.events.type='NewBlock' AND app.name = 'fuzzed'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + true}, + {`tm.events.type='NewHeader' AND app.name = 'fuzzed'`, + newTestEvents(`tm|events.type=NewBlock`, `app|name=fuzzed`), + false}, + {`slash EXISTS`, + newTestEvents(`slash|reason=missing_signature|power=6000`), + true}, + {`slash EXISTS`, + newTestEvents(`transfer|recipient=cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz|sender=cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5`), + false}, + {`slash.reason EXISTS AND slash.power > 1000`, + newTestEvents(`slash|reason=missing_signature|power=6000`), + true}, + {`slash.reason EXISTS AND slash.power > 1000`, + newTestEvents(`slash|reason=missing_signature|power=500`), + false}, + {`slash.reason EXISTS`, + newTestEvents(`transfer|recipient=cosmos1gu6y2a0ffteesyeyeesk23082c6998xyzmt9mz|sender=cosmos1crje20aj4gxdtyct7z3knxqry2jqt2fuaey6u5`), + false}, + + // Test cases based on the OpenAPI examples. + {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA'`, + apiEvents, true}, + {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrA' AND rewards.withdraw.source = 'SrcY'`, + apiEvents, true}, + {`tm.event = 'Tx' AND transfer.sender = 'AddrA'`, + apiEvents, false}, + {`tm.event = 'Tx' AND transfer.sender = 'AddrC'`, + apiEvents, true}, + {`tm.event = 'Tx' AND transfer.sender = 'AddrZ'`, + apiEvents, false}, + {`tm.event = 'Tx' AND rewards.withdraw.address = 'AddrZ'`, + apiEvents, false}, + {`tm.event = 'Tx' AND rewards.withdraw.source = 'W'`, + apiEvents, false}, + } + + // NOTE: The original implementation allowed arbitrary prefix matches on + // attribute tags, e.g., "sl" would match "slash". + // + // That is weird and probably wrong: "foo.ba" should not match "foo.bar", + // or there is no way to distinguish the case where there were two values + // for "foo.bar" or one value each for "foo.ba" and "foo.bar". + // + // Apart from a single test case, I could not find any attested usage of + // this implementation detail. It isn't documented in the OpenAPI docs and + // is not shown in any of the example inputs. + // + // On that basis, I removed that test case. This implementation still does + // correctly handle variable type/attribute splits ("x", "y.z" / "x.y", "z") + // since that was required by the original "flattened" event representation. + + for i, tc := range testCases { + t.Run(fmt.Sprintf("%02d", i+1), func(t *testing.T) { + c, err := query.New(tc.s) + if err != nil { + t.Fatalf("NewCompiled %#q: unexpected error: %v", tc.s, err) + } + + got, err := c.Matches(tc.events) + if err != nil { + t.Errorf("Query: %#q\nInput: %+v\nMatches: got error %v", + tc.s, tc.events, err) + } + if got != tc.matches { + t.Errorf("Query: %#q\nInput: %+v\nMatches: got %v, want %v", + tc.s, tc.events, got, tc.matches) + } + }) + } +} + +func sortEvents(events []types.Event) []types.Event { + sort.Slice(events, func(i, j int) bool { + if events[i].Type == events[j].Type { + return events[i].Attributes[0].Key < events[j].Attributes[0].Key + } + return events[i].Type < events[j].Type + }) + return events +} + +func TestExpandEvents(t *testing.T) { + expanded := query.ExpandEvents(apiEvents) + bz, err := json.Marshal(sortEvents(expanded)) + require.NoError(t, err) + bz2, err := json.Marshal(sortEvents(apiTypeEvents)) + require.NoError(t, err) + if string(bz) != string(bz2) { + t.Errorf("got %s, want %v", string(bz), string(bz2)) + } +} + +func TestAllMatchesAll(t *testing.T) { + events := newTestEvents( + ``, + `Asher|Roth=`, + `Route|66=`, + `Rilly|Blue=`, + ) + keys := make([]string, 0) + for k := range events { + keys = append(keys, k) + } + for _, key := range keys { + delete(events, key) + match, err := query.All.Matches(events) + if err != nil { + t.Errorf("Matches failed: %v", err) + } else if !match { + t.Errorf("Did not match on %+v ", events) + } + } +} + +// newTestEvent constructs an Event message from a template string. +// The format is "type|attr1=val1|attr2=val2|...". +func addNewTestEvent(events map[string][]string, s string) { + parts := strings.Split(s, "|") + key := parts[0] + for _, kv := range parts[1:] { + k, v := splitKV(kv) + k = key + "." + k + events[k] = append(events[k], v) + } +} + +// newTestEvents constructs a slice of Event messages by applying newTestEvent +// to each element of ss. +func newTestEvents(ss ...string) map[string][]string { + events := make(map[string][]string) + for _, s := range ss { + addNewTestEvent(events, s) + } + return events +} + +func splitKV(s string) (key, value string) { + kv := strings.SplitN(s, "=", 2) + return kv[0], kv[1] +} diff --git a/libs/pubsub/query/syntax/doc.go b/libs/pubsub/query/syntax/doc.go new file mode 100644 index 0000000..b9fb1af --- /dev/null +++ b/libs/pubsub/query/syntax/doc.go @@ -0,0 +1,33 @@ +// Package syntax defines a scanner and parser for the CometBFT event filter +// query language. A query selects events by their types and attribute values. +// +// # Grammar +// +// The grammar of the query language is defined by the following EBNF: +// +// query = conditions EOF +// conditions = condition {"AND" condition} +// condition = tag comparison +// comparison = equal / order / contains / "EXISTS" +// equal = "=" (date / number / time / value) +// order = cmp (date / number / time) +// contains = "CONTAINS" value +// cmp = "<" / "<=" / ">" / ">=" +// +// The lexical terms are defined here using RE2 regular expression notation: +// +// // The name of an event attribute (type.value) +// tag = #'\w+(\.\w+)*' +// +// // A datestamp (YYYY-MM-DD) +// date = #'DATE \d{4}-\d{2}-\d{2}' +// +// // A number with optional fractional parts (0, 10, 3.25) +// number = #'\d+(\.\d+)?' +// +// // An RFC3339 timestamp (2021-11-23T22:04:19-09:00) +// time = #'TIME \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}([-+]\d{2}:\d{2}|Z)' +// +// // A quoted literal string value ('a b c') +// value = #'\'[^\']*\'' +package syntax diff --git a/libs/pubsub/query/syntax/parser.go b/libs/pubsub/query/syntax/parser.go new file mode 100644 index 0000000..26c8554 --- /dev/null +++ b/libs/pubsub/query/syntax/parser.go @@ -0,0 +1,230 @@ +package syntax + +import ( + "fmt" + "io" + "math/big" + "strings" + "time" +) + +// Parse parses the specified query string. It is shorthand for constructing a +// parser for s and calling its Parse method. +func Parse(s string) (Query, error) { + return NewParser(strings.NewReader(s)).Parse() +} + +// Query is the root of the parse tree for a query. A query is the conjunction +// of one or more conditions. +type Query []Condition + +func (q Query) String() string { + ss := make([]string, len(q)) + for i, cond := range q { + ss[i] = cond.String() + } + return strings.Join(ss, " AND ") +} + +// A Condition is a single conditional expression, consisting of a tag, a +// comparison operator, and an optional argument. The type of the argument +// depends on the operator. +type Condition struct { + Tag string + Op Token + Arg *Arg + + opText string +} + +func (c Condition) String() string { + s := c.Tag + " " + c.opText + if c.Arg != nil { + return s + " " + c.Arg.String() + } + return s +} + +// An Arg is the argument of a comparison operator. +type Arg struct { + Type Token + text string +} + +func (a *Arg) String() string { + if a == nil { + return "" + } + switch a.Type { + case TString: + return "'" + a.text + "'" + case TTime: + return "TIME " + a.text + case TDate: + return "DATE " + a.text + default: + return a.text + } +} + +// Number returns the value of the argument text as a number, or nil if the +// text does not encode a valid number value. +func (a *Arg) Number() *big.Float { + if a == nil { + return nil + } + intVal := new(big.Int) + if _, ok := intVal.SetString(a.text, 10); !ok { + f, _, err := big.ParseFloat(a.text, 10, 125, big.ToNearestEven) + if err != nil { + return nil + } + return f + } + // If it is indeed a big integer, we make sure to convert it to a float with enough precision + // to represent all the bits + bitLen := uint(intVal.BitLen()) + var f *big.Float + var err error + if bitLen <= 64 { + f, _, err = big.ParseFloat(a.text, 10, 0, big.ToNearestEven) + } else { + f, _, err = big.ParseFloat(a.text, 10, bitLen, big.ToNearestEven) + } + if err != nil { + return nil + } + return f + +} + +// Time returns the value of the argument text as a time, or the zero value if +// the text does not encode a timestamp or datestamp. +func (a *Arg) Time() time.Time { + var ts time.Time + if a == nil { + return ts + } + var err error + switch a.Type { + case TDate: + ts, err = ParseDate(a.text) + case TTime: + ts, err = ParseTime(a.text) + } + if err == nil { + return ts + } + return time.Time{} +} + +// Value returns the value of the argument text as a string, or "". +func (a *Arg) Value() string { + if a == nil { + return "" + } + return a.text +} + +// Parser is a query expression parser. The grammar for query expressions is +// defined in the syntax package documentation. +type Parser struct { + scanner *Scanner +} + +// NewParser constructs a new parser that reads the input from r. +func NewParser(r io.Reader) *Parser { + return &Parser{scanner: NewScanner(r)} +} + +// Parse parses the complete input and returns the resulting query. +func (p *Parser) Parse() (Query, error) { + cond, err := p.parseCond() + if err != nil { + return nil, err + } + conds := []Condition{cond} + for p.scanner.Next() != io.EOF { + if tok := p.scanner.Token(); tok != TAnd { + return nil, fmt.Errorf("offset %d: got %v, want %v", p.scanner.Pos(), tok, TAnd) + } + cond, err := p.parseCond() + if err != nil { + return nil, err + } + conds = append(conds, cond) + } + return conds, nil +} + +// parseCond parses a conditional expression: tag OP value. +func (p *Parser) parseCond() (Condition, error) { + var cond Condition + if err := p.require(TTag); err != nil { + return cond, err + } + cond.Tag = p.scanner.Text() + if err := p.require(TLeq, TGeq, TLt, TGt, TEq, TContains, TExists); err != nil { + return cond, err + } + cond.Op = p.scanner.Token() + cond.opText = p.scanner.Text() + + var err error + switch cond.Op { + case TLeq, TGeq, TLt, TGt: + err = p.require(TNumber, TTime, TDate) + case TEq: + err = p.require(TNumber, TTime, TDate, TString) + case TContains: + err = p.require(TString) + case TExists: + // no argument + return cond, nil + default: + return cond, fmt.Errorf("offset %d: unexpected operator %v", p.scanner.Pos(), cond.Op) + } + if err != nil { + return cond, err + } + cond.Arg = &Arg{Type: p.scanner.Token(), text: p.scanner.Text()} + return cond, nil +} + +// require advances the scanner and requires that the resulting token is one of +// the specified token types. +func (p *Parser) require(tokens ...Token) error { + if err := p.scanner.Next(); err != nil { + return fmt.Errorf("offset %d: %w", p.scanner.Pos(), err) + } + got := p.scanner.Token() + for _, tok := range tokens { + if tok == got { + return nil + } + } + return fmt.Errorf("offset %d: got %v, wanted %s", p.scanner.Pos(), got, tokLabel(tokens)) +} + +// tokLabel makes a human-readable summary string for the given token types. +func tokLabel(tokens []Token) string { + if len(tokens) == 1 { + return tokens[0].String() + } + last := len(tokens) - 1 + ss := make([]string, len(tokens)-1) + for i, tok := range tokens[:last] { + ss[i] = tok.String() + } + return strings.Join(ss, ", ") + " or " + tokens[last].String() +} + +// ParseDate parses s as a date string in the format used by DATE values. +func ParseDate(s string) (time.Time, error) { + return time.Parse("2006-01-02", s) +} + +// ParseTime parses s as a timestamp in the format used by TIME values. +func ParseTime(s string) (time.Time, error) { + return time.Parse(time.RFC3339, s) +} diff --git a/libs/pubsub/query/syntax/scanner.go b/libs/pubsub/query/syntax/scanner.go new file mode 100644 index 0000000..332e3f7 --- /dev/null +++ b/libs/pubsub/query/syntax/scanner.go @@ -0,0 +1,312 @@ +package syntax + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + "time" + "unicode" +) + +// Token is the type of a lexical token in the query grammar. +type Token byte + +const ( + TInvalid = iota // invalid or unknown token + TTag // field tag: x.y + TString // string value: 'foo bar' + TNumber // number: 0, 15.5, 100 + TTime // timestamp: TIME yyyy-mm-ddThh:mm:ss([-+]hh:mm|Z) + TDate // datestamp: DATE yyyy-mm-dd + TAnd // operator: AND + TContains // operator: CONTAINS + TExists // operator: EXISTS + TEq // operator: = + TLt // operator: < + TLeq // operator: <= + TGt // operator: > + TGeq // operator: >= + + // Do not reorder these values without updating the scanner code. +) + +var tString = [...]string{ + TInvalid: "invalid token", + TTag: "tag", + TString: "string", + TNumber: "number", + TTime: "timestamp", + TDate: "datestamp", + TAnd: "AND operator", + TContains: "CONTAINS operator", + TExists: "EXISTS operator", + TEq: "= operator", + TLt: "< operator", + TLeq: "<= operator", + TGt: "> operator", + TGeq: ">= operator", +} + +func (t Token) String() string { + v := int(t) + if v > len(tString) { + return "unknown token type" + } + return tString[v] +} + +const ( + // TimeFormat is the format string used for timestamp values. + TimeFormat = time.RFC3339 + + // DateFormat is the format string used for datestamp values. + DateFormat = "2006-01-02" +) + +// Scanner reads lexical tokens of the query language from an input stream. +// Each call to Next advances the scanner to the next token, or reports an +// error. +type Scanner struct { + r *bufio.Reader + buf bytes.Buffer + tok Token + err error + + pos, last, end int +} + +// NewScanner constructs a new scanner that reads from r. +func NewScanner(r io.Reader) *Scanner { return &Scanner{r: bufio.NewReader(r)} } + +// Next advances s to the next token in the input, or reports an error. At the +// end of input, Next returns io.EOF. +func (s *Scanner) Next() error { + s.buf.Reset() + s.pos = s.end + s.tok = TInvalid + s.err = nil + + for { + ch, err := s.rune() + if err != nil { + return s.fail(err) + } + if unicode.IsSpace(ch) { + s.pos = s.end + continue // skip whitespace + } + if '0' <= ch && ch <= '9' { + return s.scanNumber(ch) + } else if isTagRune(ch) { + return s.scanTagLike(ch) + } + switch ch { + case '\'': + return s.scanString(ch) + case '<', '>', '=': + return s.scanCompare(ch) + default: + return s.invalid(ch) + } + } +} + +// Token returns the type of the current input token. +func (s *Scanner) Token() Token { return s.tok } + +// Text returns the text of the current input token. +func (s *Scanner) Text() string { return s.buf.String() } + +// Pos returns the start offset of the current token in the input. +func (s *Scanner) Pos() int { return s.pos } + +// Err returns the last error reported by Next, if any. +func (s *Scanner) Err() error { return s.err } + +// scanNumber scans for numbers with optional fractional parts. +// Examples: 0, 1, 3.14 +func (s *Scanner) scanNumber(first rune) error { + s.buf.WriteRune(first) + if err := s.scanWhile(isDigit); err != nil { + return err + } + + ch, err := s.rune() + if err != nil && err != io.EOF { + return err + } + if ch == '.' { + s.buf.WriteRune(ch) + if err := s.scanWhile(isDigit); err != nil { + return err + } + } else { + s.unrune() + } + s.tok = TNumber + return nil +} + +func (s *Scanner) scanString(first rune) error { + // discard opening quote + for { + ch, err := s.rune() + if err != nil { + return s.fail(err) + } else if ch == first { + // discard closing quote + s.tok = TString + return nil + } + s.buf.WriteRune(ch) + } +} + +func (s *Scanner) scanCompare(first rune) error { + s.buf.WriteRune(first) + switch first { + case '=': + s.tok = TEq + return nil + case '<': + s.tok = TLt + case '>': + s.tok = TGt + default: + return s.invalid(first) + } + + ch, err := s.rune() + if err == io.EOF { + return nil // the assigned token is correct + } else if err != nil { + return s.fail(err) + } + if ch == '=' { + s.buf.WriteRune(ch) + s.tok++ // depends on token order + return nil + } + s.unrune() + return nil +} + +func (s *Scanner) scanTagLike(first rune) error { + s.buf.WriteRune(first) + var hasSpace bool + for { + ch, err := s.rune() + if err == io.EOF { + break + } else if err != nil { + return s.fail(err) + } + if !isTagRune(ch) { + hasSpace = ch == ' ' // to check for TIME, DATE + break + } + s.buf.WriteRune(ch) + } + + text := s.buf.String() + switch text { + case "TIME": + if hasSpace { + return s.scanTimestamp() + } + s.tok = TTag + case "DATE": + if hasSpace { + return s.scanDatestamp() + } + s.tok = TTag + case "AND": + s.tok = TAnd + case "EXISTS": + s.tok = TExists + case "CONTAINS": + s.tok = TContains + default: + s.tok = TTag + } + s.unrune() + return nil +} + +func (s *Scanner) scanTimestamp() error { + s.buf.Reset() // discard "TIME" label + if err := s.scanWhile(isTimeRune); err != nil { + return err + } + if ts, err := time.Parse(TimeFormat, s.buf.String()); err != nil { + return s.fail(fmt.Errorf("invalid TIME value: %w", err)) + } else if y := ts.Year(); y < 1900 || y > 2999 { + return s.fail(fmt.Errorf("timestamp year %d out of range", ts.Year())) + } + s.tok = TTime + return nil +} + +func (s *Scanner) scanDatestamp() error { + s.buf.Reset() // discard "DATE" label + if err := s.scanWhile(isDateRune); err != nil { + return err + } + if ts, err := time.Parse(DateFormat, s.buf.String()); err != nil { + return s.fail(fmt.Errorf("invalid DATE value: %w", err)) + } else if y := ts.Year(); y < 1900 || y > 2999 { + return s.fail(fmt.Errorf("datestamp year %d out of range", ts.Year())) + } + s.tok = TDate + return nil +} + +func (s *Scanner) scanWhile(ok func(rune) bool) error { + for { + ch, err := s.rune() + if err == io.EOF { + return nil + } else if err != nil { + return s.fail(err) + } else if !ok(ch) { + s.unrune() + return nil + } + s.buf.WriteRune(ch) + } +} + +func (s *Scanner) rune() (rune, error) { + ch, nb, err := s.r.ReadRune() + s.last = nb + s.end += nb + return ch, err +} + +func (s *Scanner) unrune() { + _ = s.r.UnreadRune() + s.end -= s.last +} + +func (s *Scanner) fail(err error) error { + s.err = err + return err +} + +func (s *Scanner) invalid(ch rune) error { + return s.fail(fmt.Errorf("invalid input %c at offset %d", ch, s.end)) +} + +func isDigit(r rune) bool { return '0' <= r && r <= '9' } + +func isTagRune(r rune) bool { + return r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} + +func isTimeRune(r rune) bool { + return strings.ContainsRune("-T:+Z", r) || isDigit(r) +} + +func isDateRune(r rune) bool { return isDigit(r) || r == '-' } diff --git a/libs/pubsub/query/syntax/syntax_test.go b/libs/pubsub/query/syntax/syntax_test.go new file mode 100644 index 0000000..a815a94 --- /dev/null +++ b/libs/pubsub/query/syntax/syntax_test.go @@ -0,0 +1,190 @@ +package syntax_test + +import ( + "io" + "reflect" + "strings" + "testing" + + "github.com/strangelove-ventures/cometbft-client/libs/pubsub/query/syntax" +) + +func TestScanner(t *testing.T) { + tests := []struct { + input string + want []syntax.Token + }{ + // Empty inputs + {"", nil}, + {" ", nil}, + {"\t\n ", nil}, + + // Numbers + {`0 123`, []syntax.Token{syntax.TNumber, syntax.TNumber}}, + {`0.32 3.14`, []syntax.Token{syntax.TNumber, syntax.TNumber}}, + + // Tags + {`foo foo.bar`, []syntax.Token{syntax.TTag, syntax.TTag}}, + + // Strings (values) + {` '' x 'x' 'x y'`, []syntax.Token{syntax.TString, syntax.TTag, syntax.TString, syntax.TString}}, + {` 'you are not your job' `, []syntax.Token{syntax.TString}}, + + // Comparison operators + {`< <= = > >=`, []syntax.Token{ + syntax.TLt, syntax.TLeq, syntax.TEq, syntax.TGt, syntax.TGeq, + }}, + + // Mixed values of various kinds. + {`x AND y`, []syntax.Token{syntax.TTag, syntax.TAnd, syntax.TTag}}, + {`x.y CONTAINS 'z'`, []syntax.Token{syntax.TTag, syntax.TContains, syntax.TString}}, + {`foo EXISTS`, []syntax.Token{syntax.TTag, syntax.TExists}}, + {`and AND`, []syntax.Token{syntax.TTag, syntax.TAnd}}, + + // Timestamp + {`TIME 2021-11-23T15:16:17Z`, []syntax.Token{syntax.TTime}}, + + // Datestamp + {`DATE 2021-11-23`, []syntax.Token{syntax.TDate}}, + } + + for _, test := range tests { + s := syntax.NewScanner(strings.NewReader(test.input)) + var got []syntax.Token + for s.Next() == nil { + got = append(got, s.Token()) + } + if err := s.Err(); err != io.EOF { + t.Errorf("Next: unexpected error: %v", err) + } + + if !reflect.DeepEqual(got, test.want) { + t.Logf("Scanner input: %q", test.input) + t.Errorf("Wrong tokens:\ngot: %+v\nwant: %+v", got, test.want) + } + } +} + +func TestScannerErrors(t *testing.T) { + tests := []struct { + input string + }{ + {`'incomplete string`}, + {`-23`}, + {`&`}, + {`DATE xyz-pdq`}, + {`DATE xyzp-dq-zv`}, + {`DATE 0000-00-00`}, + {`DATE 0000-00-000`}, + {`DATE 2021-01-99`}, + {`TIME 2021-01-01T34:56:78Z`}, + {`TIME 2021-01-99T14:56:08Z`}, + {`TIME 2021-01-99T34:56:08`}, + {`TIME 2021-01-99T34:56:11+3`}, + } + for _, test := range tests { + s := syntax.NewScanner(strings.NewReader(test.input)) + if err := s.Next(); err == nil { + t.Errorf("Next: got %v (%#q), want error", s.Token(), s.Text()) + } + } +} + +// These parser tests were copied from the original implementation of the query +// parser, and are preserved here as a compatibility check. +func TestParseValid(t *testing.T) { + tests := []struct { + input string + valid bool + }{ + {"tm.events.type='NewBlock'", true}, + {"tm.events.type = 'NewBlock'", true}, + {"tm.events.name = ''", true}, + {"tm.events.type='TIME'", true}, + {"tm.events.type='DATE'", true}, + {"tm.events.type='='", true}, + {"tm.events.type='TIME", false}, + {"tm.events.type=TIME'", false}, + {"tm.events.type==", false}, + {"tm.events.type=NewBlock", false}, + {">==", false}, + {"tm.events.type 'NewBlock' =", false}, + {"tm.events.type>'NewBlock'", false}, + {"", false}, + {"=", false}, + {"='NewBlock'", false}, + {"tm.events.type=", false}, + + {"tm.events.typeNewBlock", false}, + {"tm.events.type'NewBlock'", false}, + {"'NewBlock'", false}, + {"NewBlock", false}, + {"", false}, + + {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, + {"tm.events.type='NewBlock' AND", false}, + {"tm.events.type='NewBlock' AN", false}, + {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, + {"AND tm.events.type='NewBlock' ", false}, + + {"abci.account.name CONTAINS 'Igor'", true}, + + {"tx.date > DATE 2013-05-03", true}, + {"tx.date < DATE 2013-05-03", true}, + {"tx.date <= DATE 2013-05-03", true}, + {"tx.date >= DATE 2013-05-03", true}, + {"tx.date >= DAT 2013-05-03", false}, + {"tx.date <= DATE2013-05-03", false}, + {"tx.date <= DATE -05-03", false}, + {"tx.date >= DATE 20130503", false}, + {"tx.date >= DATE 2013+01-03", false}, + // incorrect year, month, day + {"tx.date >= DATE 0013-01-03", false}, + {"tx.date >= DATE 2013-31-03", false}, + {"tx.date >= DATE 2013-01-83", false}, + + {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, + {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, + {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, + {"tx.date >= TIME2013-05-03T14:45:00Z", false}, + {"tx.date = IME 2013-05-03T14:45:00Z", false}, + {"tx.date = TIME 2013-05-:45:00Z", false}, + {"tx.date >= TIME 2013-05-03T14:45:00", false}, + {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, + {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, + + {"account.balance=100", true}, + {"account.balance >= 200", true}, + {"account.balance >= -300", false}, + {"account.balance >>= 400", false}, + {"account.balance=33.22.1", false}, + + {"slashing.amount EXISTS", true}, + {"slashing.amount EXISTS AND account.balance=100", true}, + {"account.balance=100 AND slashing.amount EXISTS", true}, + {"slashing EXISTS", true}, + + {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, + {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, + } + + for _, test := range tests { + q, err := syntax.Parse(test.input) + if test.valid != (err == nil) { + t.Errorf("Parse %#q: valid %v got err=%v", test.input, test.valid, err) + } + + // For valid queries, check that the query round-trips. + if test.valid { + qstr := q.String() + r, err := syntax.Parse(qstr) + if err != nil { + t.Errorf("Reparse %#q failed: %v", qstr, err) + } + if rstr := r.String(); rstr != qstr { + t.Errorf("Reparse diff\nold: %#q\nnew: %#q", qstr, rstr) + } + } + } +} diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go new file mode 100644 index 0000000..a30fbaf --- /dev/null +++ b/libs/pubsub/subscription.go @@ -0,0 +1,91 @@ +package pubsub + +import ( + "errors" + + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +var ( + // ErrUnsubscribed is returned by Err when a client unsubscribes. + ErrUnsubscribed = errors.New("client unsubscribed") + + // ErrOutOfCapacity is returned by Err when a client is not pulling messages + // fast enough. Note the client's subscription will be terminated. + ErrOutOfCapacity = errors.New("internal subscription event buffer is out of capacity") +) + +// A Subscription represents a client subscription for a particular query and +// consists of three things: +// 1) channel onto which messages and events are published +// 2) channel which is closed if a client is too slow or choose to unsubscribe +// 3) err indicating the reason for (2) +type Subscription struct { + out chan Message + + canceled chan struct{} + mtx cmtsync.RWMutex + err error +} + +// NewSubscription returns a new subscription with the given outCapacity. +func NewSubscription(outCapacity int) *Subscription { + return &Subscription{ + out: make(chan Message, outCapacity), + canceled: make(chan struct{}), + } +} + +// Out returns a channel onto which messages and events are published. +// Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from +// receiving a nil message. +func (s *Subscription) Out() <-chan Message { + return s.out +} + +// Canceled returns a channel that's closed when the subscription is +// terminated and supposed to be used in a select statement. +func (s *Subscription) Canceled() <-chan struct{} { + return s.canceled +} + +// Err returns nil if the channel returned is not yet closed. +// If the channel is closed, Err returns a non-nil error explaining why: +// - ErrUnsubscribed if the subscriber choose to unsubscribe, +// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough +// and the channel returned by Out became full, +// +// After Err returns a non-nil error, successive calls to Err return the same +// error. +func (s *Subscription) Err() error { + s.mtx.RLock() + defer s.mtx.RUnlock() + return s.err +} + +func (s *Subscription) cancel(err error) { + s.mtx.Lock() + s.err = err + s.mtx.Unlock() + close(s.canceled) +} + +// Message glues data and events together. +type Message struct { + data interface{} + events map[string][]string +} + +func NewMessage(data interface{}, events map[string][]string) Message { + return Message{data, events} +} + +// Data returns an original data published. +func (msg Message) Data() interface{} { + return msg.data +} + +// Events returns events, which matched the client's query. +func (msg Message) Events() map[string][]string { + return msg.events +} diff --git a/libs/rand/random.go b/libs/rand/random.go new file mode 100644 index 0000000..732d2c7 --- /dev/null +++ b/libs/rand/random.go @@ -0,0 +1,312 @@ +package rand + +import ( + crand "crypto/rand" + mrand "math/rand" + "time" + + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +const ( + strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters +) + +// Rand is a prng, that is seeded with OS randomness. +// The OS randomness is obtained from crypto/rand, however none of the provided +// methods are suitable for cryptographic usage. +// They all utilize math/rand's prng internally. +// +// All of the methods here are suitable for concurrent use. +// This is achieved by using a mutex lock on all of the provided methods. +type Rand struct { + cmtsync.Mutex + rand *mrand.Rand +} + +var grand *Rand + +func init() { + grand = NewRand() + grand.init() +} + +func NewRand() *Rand { + rand := &Rand{} + rand.init() + return rand +} + +func (r *Rand) init() { + bz := cRandBytes(8) + var seed uint64 + for i := 0; i < 8; i++ { + seed |= uint64(bz[i]) + seed <<= 8 + } + r.reset(int64(seed)) +} + +func (r *Rand) reset(seed int64) { + // G404: Use of weak random number generator (math/rand instead of crypto/rand) + //nolint:gosec + r.rand = mrand.New(mrand.NewSource(seed)) +} + +//---------------------------------------- +// Global functions + +func Seed(seed int64) { + grand.Seed(seed) +} + +func Str(length int) string { + return grand.Str(length) +} + +func Uint16() uint16 { + return grand.Uint16() +} + +func Uint32() uint32 { + return grand.Uint32() +} + +func Uint64() uint64 { + return grand.Uint64() +} + +func Uint() uint { + return grand.Uint() +} + +func Int16() int16 { + return grand.Int16() +} + +func Int32() int32 { + return grand.Int32() +} + +func Int64() int64 { + return grand.Int64() +} + +func Int() int { + return grand.Int() +} + +func Int31() int32 { + return grand.Int31() +} + +func Int31n(n int32) int32 { + return grand.Int31n(n) +} + +func Int63() int64 { + return grand.Int63() +} + +func Int63n(n int64) int64 { + return grand.Int63n(n) +} + +func Bool() bool { + return grand.Bool() +} + +func Float32() float32 { + return grand.Float32() +} + +func Float64() float64 { + return grand.Float64() +} + +func Time() time.Time { + return grand.Time() +} + +func Bytes(n int) []byte { + return grand.Bytes(n) +} + +func Intn(n int) int { + return grand.Intn(n) +} + +func Perm(n int) []int { + return grand.Perm(n) +} + +//---------------------------------------- +// Rand methods + +func (r *Rand) Seed(seed int64) { + r.Lock() + r.reset(seed) + r.Unlock() +} + +// Str constructs a random alphanumeric string of given length. +func (r *Rand) Str(length int) string { + if length <= 0 { + return "" + } + + chars := []byte{} +MAIN_LOOP: + for { + val := r.Int63() + for i := 0; i < 10; i++ { + v := int(val & 0x3f) // rightmost 6 bits + if v >= 62 { // only 62 characters in strChars + val >>= 6 + continue + } + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 + } + } + + return string(chars) +} + +func (r *Rand) Uint16() uint16 { + return uint16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Uint32() uint32 { + r.Lock() + u32 := r.rand.Uint32() + r.Unlock() + return u32 +} + +func (r *Rand) Uint64() uint64 { + return uint64(r.Uint32())<<32 + uint64(r.Uint32()) +} + +func (r *Rand) Uint() uint { + r.Lock() + i := r.rand.Int() + r.Unlock() + return uint(i) +} + +func (r *Rand) Int16() int16 { + return int16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Int32() int32 { + return int32(r.Uint32()) +} + +func (r *Rand) Int64() int64 { + return int64(r.Uint64()) +} + +func (r *Rand) Int() int { + r.Lock() + i := r.rand.Int() + r.Unlock() + return i +} + +func (r *Rand) Int31() int32 { + r.Lock() + i31 := r.rand.Int31() + r.Unlock() + return i31 +} + +func (r *Rand) Int31n(n int32) int32 { + r.Lock() + i31n := r.rand.Int31n(n) + r.Unlock() + return i31n +} + +func (r *Rand) Int63() int64 { + r.Lock() + i63 := r.rand.Int63() + r.Unlock() + return i63 +} + +func (r *Rand) Int63n(n int64) int64 { + r.Lock() + i63n := r.rand.Int63n(n) + r.Unlock() + return i63n +} + +func (r *Rand) Float32() float32 { + r.Lock() + f32 := r.rand.Float32() + r.Unlock() + return f32 +} + +func (r *Rand) Float64() float64 { + r.Lock() + f64 := r.rand.Float64() + r.Unlock() + return f64 +} + +func (r *Rand) Time() time.Time { + return time.Unix(int64(r.Uint64()), 0) +} + +// Bytes returns n random bytes generated from the internal +// prng. +func (r *Rand) Bytes(n int) []byte { + // cRandBytes isn't guaranteed to be fast so instead + // use random bytes generated from the internal PRNG + bs := make([]byte, n) + for i := 0; i < len(bs); i++ { + bs[i] = byte(r.Int() & 0xFF) + } + return bs +} + +// Intn returns, as an int, a uniform pseudo-random number in the range [0, n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + r.Lock() + i := r.rand.Intn(n) + r.Unlock() + return i +} + +// Bool returns a uniformly random boolean +func (r *Rand) Bool() bool { + // See https://github.com/golang/go/issues/23804#issuecomment-365370418 + // for reasoning behind computing like this + return r.Int63()%2 == 0 +} + +// Perm returns a pseudo-random permutation of n integers in [0, n). +func (r *Rand) Perm(n int) []int { + r.Lock() + perm := r.rand.Perm(n) + r.Unlock() + return perm +} + +// NOTE: This relies on the os's random number generator. +// For real security, we should salt that with some seed. +// See github.com/strangelove-ventures/cometbft-client/crypto for a more secure reader. +func cRandBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + panic(err) + } + return b +} diff --git a/libs/rand/random_test.go b/libs/rand/random_test.go new file mode 100644 index 0000000..ec4aa32 --- /dev/null +++ b/libs/rand/random_test.go @@ -0,0 +1,115 @@ +package rand + +import ( + "bytes" + "encoding/json" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRandStr(t *testing.T) { + l := 243 + s := Str(l) + assert.Equal(t, l, len(s)) +} + +func TestRandBytes(t *testing.T) { + l := 243 + b := Bytes(l) + assert.Equal(t, l, len(b)) +} + +func TestRandIntn(t *testing.T) { + n := 243 + for i := 0; i < 100; i++ { + x := Intn(n) + assert.True(t, x < n) + } +} + +// Test to make sure that we never call math.rand(). +// We do this by ensuring that outputs are deterministic. +func TestDeterminism(t *testing.T) { + var firstOutput string + + for i := 0; i < 100; i++ { + output := testThemAll() + if i == 0 { + firstOutput = output + } else if firstOutput != output { + t.Errorf("run #%d's output was different from first run.\nfirst: %v\nlast: %v", + i, firstOutput, output) + } + } +} + +func testThemAll() string { + // Such determinism. + grand.reset(1) + + // Use it. + out := new(bytes.Buffer) + perm := Perm(10) + blob, _ := json.Marshal(perm) + fmt.Fprintf(out, "perm: %s\n", blob) + fmt.Fprintf(out, "randInt: %d\n", Int()) + fmt.Fprintf(out, "randUint: %d\n", Uint()) + fmt.Fprintf(out, "randIntn: %d\n", Intn(97)) + fmt.Fprintf(out, "randInt31: %d\n", Int31()) + fmt.Fprintf(out, "randInt32: %d\n", Int32()) + fmt.Fprintf(out, "randInt63: %d\n", Int63()) + fmt.Fprintf(out, "randInt64: %d\n", Int64()) + fmt.Fprintf(out, "randUint32: %d\n", Uint32()) + fmt.Fprintf(out, "randUint64: %d\n", Uint64()) + return out.String() +} + +func TestRngConcurrencySafety(_ *testing.T) { + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + _ = Uint64() + <-time.After(time.Millisecond * time.Duration(Intn(100))) + _ = Perm(3) + }() + } + wg.Wait() +} + +func BenchmarkRandBytes10B(b *testing.B) { + benchmarkRandBytes(b, 10) +} + +func BenchmarkRandBytes100B(b *testing.B) { + benchmarkRandBytes(b, 100) +} + +func BenchmarkRandBytes1KiB(b *testing.B) { + benchmarkRandBytes(b, 1024) +} + +func BenchmarkRandBytes10KiB(b *testing.B) { + benchmarkRandBytes(b, 10*1024) +} + +func BenchmarkRandBytes100KiB(b *testing.B) { + benchmarkRandBytes(b, 100*1024) +} + +func BenchmarkRandBytes1MiB(b *testing.B) { + benchmarkRandBytes(b, 1024*1024) +} + +func benchmarkRandBytes(b *testing.B, n int) { + for i := 0; i < b.N; i++ { + _ = Bytes(n) + } + b.ReportAllocs() +} diff --git a/libs/service/service.go b/libs/service/service.go new file mode 100644 index 0000000..4cb2426 --- /dev/null +++ b/libs/service/service.go @@ -0,0 +1,241 @@ +package service + +import ( + "errors" + "fmt" + "sync/atomic" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +var ( + // ErrAlreadyStarted is returned when somebody tries to start an already + // running service. + ErrAlreadyStarted = errors.New("already started") + // ErrAlreadyStopped is returned when somebody tries to stop an already + // stopped service (without resetting it). + ErrAlreadyStopped = errors.New("already stopped") + // ErrNotStarted is returned when somebody tries to stop a not running + // service. + ErrNotStarted = errors.New("not started") +) + +// Service defines a service that can be started, stopped, and reset. +type Service interface { + // Start the service. + // If it's already started or stopped, will return an error. + // If OnStart() returns an error, it's returned by Start() + Start() error + OnStart() error + + // Stop the service. + // If it's already stopped, will return an error. + // OnStop must never error. + Stop() error + OnStop() + + // Reset the service. + // Panics by default - must be overwritten to enable reset. + Reset() error + OnReset() error + + // Return true if the service is running + IsRunning() bool + + // Quit returns a channel, which is closed once service is stopped. + Quit() <-chan struct{} + + // String representation of the service + String() string + + // SetLogger sets a logger. + SetLogger(log.Logger) +} + +/* +Classical-inheritance-style service declarations. Services can be started, then +stopped, then optionally restarted. + +Users can override the OnStart/OnStop methods. In the absence of errors, these +methods are guaranteed to be called at most once. If OnStart returns an error, +service won't be marked as started, so the user can call Start again. + +A call to Reset will panic, unless OnReset is overwritten, allowing +OnStart/OnStop to be called again. + +The caller must ensure that Start and Stop are not called concurrently. + +It is ok to call Stop without calling Start first. + +Typical usage: + + type FooService struct { + BaseService + // private fields + } + + func NewFooService() *FooService { + fs := &FooService{ + // init + } + fs.BaseService = *NewBaseService(log, "FooService", fs) + return fs + } + + func (fs *FooService) OnStart() error { + fs.BaseService.OnStart() // Always call the overridden method. + // initialize private fields + // start subroutines, etc. + } + + func (fs *FooService) OnStop() error { + fs.BaseService.OnStop() // Always call the overridden method. + // close/destroy private fields + // stop subroutines, etc. + } +*/ +type BaseService struct { + Logger log.Logger + name string + started uint32 // atomic + stopped uint32 // atomic + quit chan struct{} + + // The "subclass" of BaseService + impl Service +} + +// NewBaseService creates a new BaseService. +func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { + if logger == nil { + logger = log.NewNopLogger() + } + + return &BaseService{ + Logger: logger, + name: name, + quit: make(chan struct{}), + impl: impl, + } +} + +// SetLogger implements Service by setting a logger. +func (bs *BaseService) SetLogger(l log.Logger) { + bs.Logger = l +} + +// Start implements Service by calling OnStart (if defined). An error will be +// returned if the service is already running or stopped. Not to start the +// stopped service, you need to call Reset. +func (bs *BaseService) Start() error { + if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { + if atomic.LoadUint32(&bs.stopped) == 1 { + bs.Logger.Error(fmt.Sprintf("Not starting %v service -- already stopped", bs.name), + "impl", bs.impl) + // revert flag + atomic.StoreUint32(&bs.started, 0) + return ErrAlreadyStopped + } + bs.Logger.Info("service start", + "msg", + log.NewLazySprintf("Starting %v service", bs.name), + "impl", + bs.impl.String()) + err := bs.impl.OnStart() + if err != nil { + // revert flag + atomic.StoreUint32(&bs.started, 0) + return err + } + return nil + } + bs.Logger.Debug("service start", + "msg", + log.NewLazySprintf("Not starting %v service -- already started", bs.name), + "impl", + bs.impl) + return ErrAlreadyStarted +} + +// OnStart implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStart() +func (bs *BaseService) OnStart() error { return nil } + +// Stop implements Service by calling OnStop (if defined) and closing quit +// channel. An error will be returned if the service is already stopped. +func (bs *BaseService) Stop() error { + if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { + if atomic.LoadUint32(&bs.started) == 0 { + bs.Logger.Error(fmt.Sprintf("Not stopping %v service -- has not been started yet", bs.name), + "impl", bs.impl) + // revert flag + atomic.StoreUint32(&bs.stopped, 0) + return ErrNotStarted + } + bs.Logger.Info("service stop", + "msg", + log.NewLazySprintf("Stopping %v service", bs.name), + "impl", + bs.impl) + bs.impl.OnStop() + close(bs.quit) + return nil + } + bs.Logger.Debug("service stop", + "msg", + log.NewLazySprintf("Stopping %v service (already stopped)", bs.name), + "impl", + bs.impl) + return ErrAlreadyStopped +} + +// OnStop implements Service by doing nothing. +// NOTE: Do not put anything in here, +// that way users don't need to call BaseService.OnStop() +func (bs *BaseService) OnStop() {} + +// Reset implements Service by calling OnReset callback (if defined). An error +// will be returned if the service is running. +func (bs *BaseService) Reset() error { + if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { + bs.Logger.Debug("service reset", + "msg", + log.NewLazySprintf("Can't reset %v service. Not stopped", bs.name), + "impl", + bs.impl) + return fmt.Errorf("can't reset running %s", bs.name) + } + + // whether or not we've started, we can reset + atomic.CompareAndSwapUint32(&bs.started, 1, 0) + + bs.quit = make(chan struct{}) + return bs.impl.OnReset() +} + +// OnReset implements Service by panicking. +func (bs *BaseService) OnReset() error { + panic("The service cannot be reset") +} + +// IsRunning implements Service by returning true or false depending on the +// service's state. +func (bs *BaseService) IsRunning() bool { + return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 +} + +// Wait blocks until the service is stopped. +func (bs *BaseService) Wait() { + <-bs.quit +} + +// String implements Service by returning a string representation of the service. +func (bs *BaseService) String() string { + return bs.name +} + +// Quit Implements Service by returning a quit channel. +func (bs *BaseService) Quit() <-chan struct{} { + return bs.quit +} diff --git a/libs/service/service_test.go b/libs/service/service_test.go new file mode 100644 index 0000000..7abc6f4 --- /dev/null +++ b/libs/service/service_test.go @@ -0,0 +1,57 @@ +package service + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type testService struct { + BaseService +} + +func (testService) OnReset() error { + return nil +} + +func TestBaseServiceWait(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + err := ts.Start() + require.NoError(t, err) + + waitFinished := make(chan struct{}) + go func() { + ts.Wait() + waitFinished <- struct{}{} + }() + + go ts.Stop() //nolint:errcheck // ignore for tests + + select { + case <-waitFinished: + // all good + case <-time.After(100 * time.Millisecond): + t.Fatal("expected Wait() to finish within 100 ms.") + } +} + +func TestBaseServiceReset(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(nil, "TestService", ts) + err := ts.Start() + require.NoError(t, err) + + err = ts.Reset() + require.Error(t, err, "expected cant reset service error") + + err = ts.Stop() + require.NoError(t, err) + + err = ts.Reset() + require.NoError(t, err) + + err = ts.Start() + require.NoError(t, err) +} diff --git a/libs/strings/string.go b/libs/strings/string.go new file mode 100644 index 0000000..f012d76 --- /dev/null +++ b/libs/strings/string.go @@ -0,0 +1,96 @@ +package strings + +import ( + "fmt" + "strings" +) + +// StringInSlice returns true if a is found the list. +func StringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// SplitAndTrim slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. +func SplitAndTrim(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + for i := 0; i < len(spl); i++ { + spl[i] = strings.Trim(spl[i], cutset) + } + return spl +} + +// SplitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func SplitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} + +// Returns true if s is a non-empty printable non-tab ascii character. +func IsASCIIText(s string) bool { + if len(s) == 0 { + return false + } + for _, b := range []byte(s) { + if b < 32 || b > 126 { + return false + } + } + return true +} + +// NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. +func ASCIITrim(s string) string { + r := make([]byte, 0, len(s)) + for _, b := range []byte(s) { + switch { + case b == 32: + continue // skip space + case 32 < b && b <= 126: + r = append(r, b) + default: + panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) + } + } + return string(r) +} + +// StringSliceEqual checks if string slices a and b are equal +func StringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/libs/strings/string_test.go b/libs/strings/string_test.go new file mode 100644 index 0000000..56c4d78 --- /dev/null +++ b/libs/strings/string_test.go @@ -0,0 +1,57 @@ +package strings + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStringInSlice(t *testing.T) { + assert.True(t, StringInSlice("a", []string{"a", "b", "c"})) + assert.False(t, StringInSlice("d", []string{"a", "b", "c"})) + assert.True(t, StringInSlice("", []string{""})) + assert.False(t, StringInSlice("", []string{})) +} + +func TestIsASCIIText(t *testing.T) { + notASCIIText := []string{ + "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", + } + for _, v := range notASCIIText { + assert.False(t, IsASCIIText(v), "%q is not ascii-text", v) + } + asciiText := []string{ + " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", + } + for _, v := range asciiText { + assert.True(t, IsASCIIText(v), "%q is ascii-text", v) + } +} + +func TestASCIITrim(t *testing.T) { + assert.Equal(t, ASCIITrim(" "), "") + assert.Equal(t, ASCIITrim(" a"), "a") + assert.Equal(t, ASCIITrim("a "), "a") + assert.Equal(t, ASCIITrim(" a "), "a") + assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) +} + +func TestStringSliceEqual(t *testing.T) { + tests := []struct { + a []string + b []string + want bool + }{ + {[]string{"hello", "world"}, []string{"hello", "world"}, true}, + {[]string{"test"}, []string{"test"}, true}, + {[]string{"test1"}, []string{"test2"}, false}, + {[]string{"hello", "world."}, []string{"hello", "world!"}, false}, + {[]string{"only 1 word"}, []string{"two", "words!"}, false}, + {[]string{"two", "words!"}, []string{"only 1 word"}, false}, + } + for i, tt := range tests { + require.Equal(t, tt.want, StringSliceEqual(tt.a, tt.b), + "StringSliceEqual failed on test %d", i) + } +} diff --git a/libs/sync/deadlock.go b/libs/sync/deadlock.go new file mode 100644 index 0000000..21b5130 --- /dev/null +++ b/libs/sync/deadlock.go @@ -0,0 +1,18 @@ +//go:build deadlock +// +build deadlock + +package sync + +import ( + deadlock "github.com/sasha-s/go-deadlock" +) + +// A Mutex is a mutual exclusion lock. +type Mutex struct { + deadlock.Mutex +} + +// An RWMutex is a reader/writer mutual exclusion lock. +type RWMutex struct { + deadlock.RWMutex +} diff --git a/libs/sync/sync.go b/libs/sync/sync.go new file mode 100644 index 0000000..c6e7101 --- /dev/null +++ b/libs/sync/sync.go @@ -0,0 +1,16 @@ +//go:build !deadlock +// +build !deadlock + +package sync + +import "sync" + +// A Mutex is a mutual exclusion lock. +type Mutex struct { + sync.Mutex +} + +// An RWMutex is a reader/writer mutual exclusion lock. +type RWMutex struct { + sync.RWMutex +} diff --git a/libs/test/mutate.go b/libs/test/mutate.go new file mode 100644 index 0000000..2be154a --- /dev/null +++ b/libs/test/mutate.go @@ -0,0 +1,28 @@ +package test + +import ( + cmtrand "github.com/strangelove-ventures/cometbft-client/libs/rand" +) + +// Contract: !bytes.Equal(input, output) && len(input) >= len(output) +func MutateByteSlice(bytez []byte) []byte { + // If bytez is empty, panic + if len(bytez) == 0 { + panic("Cannot mutate an empty bytez") + } + + // Copy bytez + mBytez := make([]byte, len(bytez)) + copy(mBytez, bytez) + bytez = mBytez + + // Try a random mutation + switch cmtrand.Int() % 2 { + case 0: // Mutate a single byte + bytez[cmtrand.Int()%len(bytez)] += byte(cmtrand.Int()%255 + 1) + case 1: // Remove an arbitrary byte + pos := cmtrand.Int() % len(bytez) + bytez = append(bytez[:pos], bytez[pos+1:]...) + } + return bytez +} diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go new file mode 100644 index 0000000..fd5c1f7 --- /dev/null +++ b/p2p/conn/connection.go @@ -0,0 +1,22 @@ +package conn + +import ( + "time" + + flow "github.com/strangelove-ventures/cometbft-client/libs/flowrate" +) + +type ConnectionStatus struct { + Duration time.Duration + SendMonitor flow.Status + RecvMonitor flow.Status + Channels []ChannelStatus +} + +type ChannelStatus struct { + ID byte + SendQueueCapacity int + SendQueueSize int + Priority int + RecentlySent int64 +} diff --git a/p2p/conn_set.go b/p2p/conn_set.go new file mode 100644 index 0000000..3da259f --- /dev/null +++ b/p2p/conn_set.go @@ -0,0 +1,82 @@ +package p2p + +import ( + "net" + + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +// ConnSet is a lookup table for connections and all their ips. +type ConnSet interface { + Has(net.Conn) bool + HasIP(net.IP) bool + Set(net.Conn, []net.IP) + Remove(net.Conn) + RemoveAddr(net.Addr) +} + +type connSetItem struct { + conn net.Conn + ips []net.IP +} + +type connSet struct { + cmtsync.RWMutex + + conns map[string]connSetItem +} + +// NewConnSet returns a ConnSet implementation. +func NewConnSet() ConnSet { + return &connSet{ + conns: map[string]connSetItem{}, + } +} + +func (cs *connSet) Has(c net.Conn) bool { + cs.RLock() + defer cs.RUnlock() + + _, ok := cs.conns[c.RemoteAddr().String()] + + return ok +} + +func (cs *connSet) HasIP(ip net.IP) bool { + cs.RLock() + defer cs.RUnlock() + + for _, c := range cs.conns { + for _, known := range c.ips { + if known.Equal(ip) { + return true + } + } + } + + return false +} + +func (cs *connSet) Remove(c net.Conn) { + cs.Lock() + defer cs.Unlock() + + delete(cs.conns, c.RemoteAddr().String()) +} + +func (cs *connSet) RemoveAddr(addr net.Addr) { + cs.Lock() + defer cs.Unlock() + + delete(cs.conns, addr.String()) +} + +func (cs *connSet) Set(c net.Conn, ips []net.IP) { + cs.Lock() + defer cs.Unlock() + + cs.conns[c.RemoteAddr().String()] = connSetItem{ + conn: c, + ips: ips, + } +} diff --git a/p2p/errors.go b/p2p/errors.go new file mode 100644 index 0000000..4fc9152 --- /dev/null +++ b/p2p/errors.go @@ -0,0 +1,191 @@ +package p2p + +import ( + "fmt" + "net" +) + +// ErrFilterTimeout indicates that a filter operation timed out. +type ErrFilterTimeout struct{} + +func (e ErrFilterTimeout) Error() string { + return "filter timed out" +} + +// ErrRejected indicates that a Peer was rejected carrying additional +// information as to the reason. +type ErrRejected struct { + addr NetAddress + conn net.Conn + err error + id ID + isAuthFailure bool + isDuplicate bool + isFiltered bool + isIncompatible bool + isNodeInfoInvalid bool + isSelf bool +} + +// Addr returns the NetAddress for the rejected Peer. +func (e ErrRejected) Addr() NetAddress { + return e.addr +} + +func (e ErrRejected) Error() string { + if e.isAuthFailure { + return fmt.Sprintf("auth failure: %s", e.err) + } + + if e.isDuplicate { + if e.conn != nil { + return fmt.Sprintf( + "duplicate CONN<%s>", + e.conn.RemoteAddr().String(), + ) + } + if e.id != "" { + return fmt.Sprintf("duplicate ID<%v>", e.id) + } + } + + if e.isFiltered { + if e.conn != nil { + return fmt.Sprintf( + "filtered CONN<%s>: %s", + e.conn.RemoteAddr().String(), + e.err, + ) + } + + if e.id != "" { + return fmt.Sprintf("filtered ID<%v>: %s", e.id, e.err) + } + } + + if e.isIncompatible { + return fmt.Sprintf("incompatible: %s", e.err) + } + + if e.isNodeInfoInvalid { + return fmt.Sprintf("invalid NodeInfo: %s", e.err) + } + + if e.isSelf { + return fmt.Sprintf("self ID<%v>", e.id) + } + + return fmt.Sprintf("%s", e.err) +} + +// IsAuthFailure when Peer authentication was unsuccessful. +func (e ErrRejected) IsAuthFailure() bool { return e.isAuthFailure } + +// IsDuplicate when Peer ID or IP are present already. +func (e ErrRejected) IsDuplicate() bool { return e.isDuplicate } + +// IsFiltered when Peer ID or IP was filtered. +func (e ErrRejected) IsFiltered() bool { return e.isFiltered } + +// IsIncompatible when Peer NodeInfo is not compatible with our own. +func (e ErrRejected) IsIncompatible() bool { return e.isIncompatible } + +// IsNodeInfoInvalid when the sent NodeInfo is not valid. +func (e ErrRejected) IsNodeInfoInvalid() bool { return e.isNodeInfoInvalid } + +// IsSelf when Peer is our own node. +func (e ErrRejected) IsSelf() bool { return e.isSelf } + +// ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known +// ID. +type ErrSwitchDuplicatePeerID struct { + ID ID +} + +func (e ErrSwitchDuplicatePeerID) Error() string { + return fmt.Sprintf("duplicate peer ID %v", e.ID) +} + +// ErrSwitchDuplicatePeerIP to be raised whena a peer is connecting with a known +// IP. +type ErrSwitchDuplicatePeerIP struct { + IP net.IP +} + +func (e ErrSwitchDuplicatePeerIP) Error() string { + return fmt.Sprintf("duplicate peer IP %v", e.IP.String()) +} + +// ErrSwitchConnectToSelf to be raised when trying to connect to itself. +type ErrSwitchConnectToSelf struct { + Addr *NetAddress +} + +func (e ErrSwitchConnectToSelf) Error() string { + return fmt.Sprintf("connect to self: %v", e.Addr) +} + +type ErrSwitchAuthenticationFailure struct { + Dialed *NetAddress + Got ID +} + +func (e ErrSwitchAuthenticationFailure) Error() string { + return fmt.Sprintf( + "failed to authenticate peer. Dialed %v, but got peer with ID %s", + e.Dialed, + e.Got, + ) +} + +// ErrTransportClosed is raised when the Transport has been closed. +type ErrTransportClosed struct{} + +func (e ErrTransportClosed) Error() string { + return "transport has been closed" +} + +// ErrPeerRemoval is raised when attempting to remove a peer results in an error. +type ErrPeerRemoval struct{} + +func (e ErrPeerRemoval) Error() string { + return "peer removal failed" +} + +//------------------------------------------------------------------- + +type ErrNetAddressNoID struct { + Addr string +} + +func (e ErrNetAddressNoID) Error() string { + return fmt.Sprintf("address (%s) does not contain ID", e.Addr) +} + +type ErrNetAddressInvalid struct { + Addr string + Err error +} + +func (e ErrNetAddressInvalid) Error() string { + return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err) +} + +type ErrNetAddressLookup struct { + Addr string + Err error +} + +func (e ErrNetAddressLookup) Error() string { + return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err) +} + +// ErrCurrentlyDialingOrExistingAddress indicates that we're currently +// dialing this address or it belongs to an existing peer. +type ErrCurrentlyDialingOrExistingAddress struct { + Addr string +} + +func (e ErrCurrentlyDialingOrExistingAddress) Error() string { + return fmt.Sprintf("connection with %s has been established or dialed", e.Addr) +} diff --git a/p2p/key.go b/p2p/key.go new file mode 100644 index 0000000..d7dd1f4 --- /dev/null +++ b/p2p/key.go @@ -0,0 +1,120 @@ +package p2p + +import ( + "bytes" + "encoding/hex" + "fmt" + "os" + + "github.com/strangelove-ventures/cometbft-client/crypto" + "github.com/strangelove-ventures/cometbft-client/crypto/ed25519" + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" + cmtos "github.com/strangelove-ventures/cometbft-client/libs/os" +) + +// ID is a hex-encoded crypto.Address +type ID string + +// IDByteLength is the length of a crypto.Address. Currently only 20. +// TODO: support other length addresses ? +const IDByteLength = crypto.AddressSize + +//------------------------------------------------------------------------------ +// Persistent peer ID +// TODO: encrypt on disk + +// NodeKey is the persistent peer key. +// It contains the nodes private key for authentication. +type NodeKey struct { + PrivKey crypto.PrivKey `json:"priv_key"` // our priv key +} + +// ID returns the peer's canonical ID - the hash of its public key. +func (nodeKey *NodeKey) ID() ID { + return PubKeyToID(nodeKey.PubKey()) +} + +// PubKey returns the peer's PubKey +func (nodeKey *NodeKey) PubKey() crypto.PubKey { + return nodeKey.PrivKey.PubKey() +} + +// PubKeyToID returns the ID corresponding to the given PubKey. +// It's the hex-encoding of the pubKey.Address(). +func PubKeyToID(pubKey crypto.PubKey) ID { + return ID(hex.EncodeToString(pubKey.Address())) +} + +// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If +// the file does not exist, it generates and saves a new NodeKey. +func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { + if cmtos.FileExists(filePath) { + nodeKey, err := LoadNodeKey(filePath) + if err != nil { + return nil, err + } + return nodeKey, nil + } + + privKey := ed25519.GenPrivKey() + nodeKey := &NodeKey{ + PrivKey: privKey, + } + + if err := nodeKey.SaveAs(filePath); err != nil { + return nil, err + } + + return nodeKey, nil +} + +// LoadNodeKey loads NodeKey located in filePath. +func LoadNodeKey(filePath string) (*NodeKey, error) { + jsonBytes, err := os.ReadFile(filePath) + if err != nil { + return nil, err + } + nodeKey := new(NodeKey) + err = cmtjson.Unmarshal(jsonBytes, nodeKey) + if err != nil { + return nil, err + } + return nodeKey, nil +} + +// SaveAs persists the NodeKey to filePath. +func (nodeKey *NodeKey) SaveAs(filePath string) error { + jsonBytes, err := cmtjson.Marshal(nodeKey) + if err != nil { + return err + } + err = os.WriteFile(filePath, jsonBytes, 0600) + if err != nil { + return err + } + return nil +} + +//------------------------------------------------------------------------------ + +// MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1. +// It can be used as a Proof of Work target. +// NOTE: targetBits must be a multiple of 8 and difficulty must be less than targetBits. +func MakePoWTarget(difficulty, targetBits uint) []byte { + if targetBits%8 != 0 { + panic(fmt.Sprintf("targetBits (%d) not a multiple of 8", targetBits)) + } + if difficulty >= targetBits { + panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits)) + } + targetBytes := targetBits / 8 + zeroPrefixLen := (int(difficulty) / 8) + prefix := bytes.Repeat([]byte{0}, zeroPrefixLen) + mod := (difficulty % 8) + if mod > 0 { + nonZeroPrefix := byte(1<<(8-mod) - 1) + prefix = append(prefix, nonZeroPrefix) + } + tailLen := int(targetBytes) - len(prefix) + return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...) +} diff --git a/p2p/netaddress.go b/p2p/netaddress.go new file mode 100644 index 0000000..1e1872b --- /dev/null +++ b/p2p/netaddress.go @@ -0,0 +1,369 @@ +// Modified for CometBFT +// Originally Copyright (c) 2013-2014 Conformal Systems LLC. +// https://github.com/conformal/btcd/blob/master/LICENSE + +package p2p + +import ( + "encoding/hex" + "errors" + "flag" + "fmt" + "net" + "strconv" + "strings" + "time" +) + +// EmptyNetAddress defines the string representation of an empty NetAddress +const EmptyNetAddress = "" + +// NetAddress defines information about a peer on the network +// including its ID, IP address, and port. +type NetAddress struct { + ID ID `json:"id"` + IP net.IP `json:"ip"` + Port uint16 `json:"port"` +} + +// IDAddressString returns id@hostPort. It strips the leading +// protocol from protocolHostPort if it exists. +func IDAddressString(id ID, protocolHostPort string) string { + hostPort := removeProtocolIfDefined(protocolHostPort) + return fmt.Sprintf("%s@%s", id, hostPort) +} + +// NewNetAddress returns a new NetAddress using the provided TCP +// address. When testing, other net.Addr (except TCP) will result in +// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will +// panic. Panics if ID is invalid. +// TODO: socks proxies? +func NewNetAddress(id ID, addr net.Addr) *NetAddress { + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + if flag.Lookup("test.v") == nil { // normal run + panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) + } else { // in testing + netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0) + netAddr.ID = id + return netAddr + } + } + + if err := validateID(id); err != nil { + panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr)) + } + + ip := tcpAddr.IP + port := uint16(tcpAddr.Port) + na := NewNetAddressIPPort(ip, port) + na.ID = id + return na +} + +// NewNetAddressString returns a new NetAddress using the provided address in +// the form of "ID@IP:Port". +// Also resolves the host if host is not an IP. +// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) +func NewNetAddressString(addr string) (*NetAddress, error) { + addrWithoutProtocol := removeProtocolIfDefined(addr) + spl := strings.Split(addrWithoutProtocol, "@") + if len(spl) != 2 { + return nil, ErrNetAddressNoID{addr} + } + + // get ID + if err := validateID(ID(spl[0])); err != nil { + return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} + } + var id ID + id, addrWithoutProtocol = ID(spl[0]), spl[1] + + // get host and port + host, portStr, err := net.SplitHostPort(addrWithoutProtocol) + if err != nil { + return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} + } + if len(host) == 0 { + return nil, ErrNetAddressInvalid{ + addrWithoutProtocol, + errors.New("host is empty")} + } + + ip := net.ParseIP(host) + if ip == nil { + ips, err := net.LookupIP(host) + if err != nil { + return nil, ErrNetAddressLookup{host, err} + } + ip = ips[0] + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, ErrNetAddressInvalid{portStr, err} + } + + na := NewNetAddressIPPort(ip, uint16(port)) + na.ID = id + return na, nil +} + +// NewNetAddressStrings returns an array of NetAddress'es build using +// the provided strings. +func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { + netAddrs := make([]*NetAddress, 0) + errs := make([]error, 0) + for _, addr := range addrs { + netAddr, err := NewNetAddressString(addr) + if err != nil { + errs = append(errs, err) + } else { + netAddrs = append(netAddrs, netAddr) + } + } + return netAddrs, errs +} + +// NewNetAddressIPPort returns a new NetAddress using the provided IP +// and port number. +func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { + return &NetAddress{ + IP: ip, + Port: port, + } +} + +// Equals reports whether na and other are the same addresses, +// including their ID, IP, and Port. +func (na *NetAddress) Equals(other interface{}) bool { + if o, ok := other.(*NetAddress); ok { + return na.String() == o.String() + } + return false +} + +// Same returns true is na has the same non-empty ID or DialString as other. +func (na *NetAddress) Same(other interface{}) bool { + if o, ok := other.(*NetAddress); ok { + if na.DialString() == o.DialString() { + return true + } + if na.ID != "" && na.ID == o.ID { + return true + } + } + return false +} + +// String representation: @: +func (na *NetAddress) String() string { + if na == nil { + return EmptyNetAddress + } + + addrStr := na.DialString() + if na.ID != "" { + addrStr = IDAddressString(na.ID, addrStr) + } + + return addrStr +} + +func (na *NetAddress) DialString() string { + if na == nil { + return "" + } + return net.JoinHostPort( + na.IP.String(), + strconv.FormatUint(uint64(na.Port), 10), + ) +} + +// Dial calls net.Dial on the address. +func (na *NetAddress) Dial() (net.Conn, error) { + conn, err := net.Dial("tcp", na.DialString()) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeout calls net.DialTimeout on the address. +func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { + conn, err := net.DialTimeout("tcp", na.DialString(), timeout) + if err != nil { + return nil, err + } + return conn, nil +} + +// Routable returns true if the address is routable. +func (na *NetAddress) Routable() bool { + if err := na.Valid(); err != nil { + return false + } + // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? + return !(na.RFC1918() || na.RFC3927() || na.RFC4862() || + na.RFC4193() || na.RFC4843() || na.Local()) +} + +// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero +// address or one that matches the RFC3849 documentation address format. +func (na *NetAddress) Valid() error { + if err := validateID(na.ID); err != nil { + return fmt.Errorf("invalid ID: %w", err) + } + + if na.IP == nil { + return errors.New("no IP") + } + if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) { + return errors.New("invalid IP") + } + return nil +} + +// HasID returns true if the address has an ID. +// NOTE: It does not check whether the ID is valid or not. +func (na *NetAddress) HasID() bool { + return string(na.ID) != "" +} + +// Local returns true if it is a local address. +func (na *NetAddress) Local() bool { + return na.IP.IsLoopback() || zero4.Contains(na.IP) +} + +// ReachabilityTo checks whenever o can be reached from na. +func (na *NetAddress) ReachabilityTo(o *NetAddress) int { + const ( + Unreachable = 0 + Default = iota + Teredo + Ipv6Weak + Ipv4 + Ipv6Strong + ) + switch { + case !na.Routable(): + return Unreachable + case na.RFC4380(): + switch { + case !o.Routable(): + return Default + case o.RFC4380(): + return Teredo + case o.IP.To4() != nil: + return Ipv4 + default: // ipv6 + return Ipv6Weak + } + case na.IP.To4() != nil: + if o.Routable() && o.IP.To4() != nil { + return Ipv4 + } + return Default + default: /* ipv6 */ + var tunneled bool + // Is our v6 is tunneled? + if o.RFC3964() || o.RFC6052() || o.RFC6145() { + tunneled = true + } + switch { + case !o.Routable(): + return Default + case o.RFC4380(): + return Teredo + case o.IP.To4() != nil: + return Ipv4 + case tunneled: + // only prioritize ipv6 if we aren't tunneling it. + return Ipv6Weak + } + return Ipv6Strong + } +} + +// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) +// RFC3849: IPv6 Documentation address (2001:0DB8::/32) +// RFC3927: IPv4 Autoconfig (169.254.0.0/16) +// RFC3964: IPv6 6to4 (2002::/16) +// RFC4193: IPv6 unique local (FC00::/7) +// RFC4380: IPv6 Teredo tunneling (2001::/32) +// RFC4843: IPv6 ORCHID: (2001:10::/28) +// RFC4862: IPv6 Autoconfig (FE80::/64) +// RFC6052: IPv6 well known prefix (64:FF9B::/96) +// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 +var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} +var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} +var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} +var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} +var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} +var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} +var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} +var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} +var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} +var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} +var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} +var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} +var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} +var ( + // onionCatNet defines the IPv6 address block used to support Tor. + // bitcoind encodes a .onion address as a 16 byte number by decoding the + // address prior to the .onion (i.e. the key hash) base32 into a ten + // byte number. It then stores the first 6 bytes of the address as + // 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43. + // + // This is the same range used by OnionCat, which is part part of the + // RFC4193 unique local IPv6 range. + // + // In summary the format is: + // { magic 6 bytes, 10 bytes base32 decode of key hash } + onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128) +) + +// ipNet returns a net.IPNet struct given the passed IP address string, number +// of one bits to include at the start of the mask, and the total number of bits +// for the mask. +func ipNet(ip string, ones, bits int) net.IPNet { + return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} +} + +func (na *NetAddress) RFC1918() bool { + return rfc1918_10.Contains(na.IP) || + rfc1918_192.Contains(na.IP) || + rfc1918_172.Contains(na.IP) +} +func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } +func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } +func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } +func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } +func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } +func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } +func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } +func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } +func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } +func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } + +func removeProtocolIfDefined(addr string) string { + if strings.Contains(addr, "://") { + return strings.Split(addr, "://")[1] + } + return addr + +} + +func validateID(id ID) error { + if len(id) == 0 { + return errors.New("no ID") + } + idBytes, err := hex.DecodeString(string(id)) + if err != nil { + return err + } + if len(idBytes) != IDByteLength { + return fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength) + } + return nil +} diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go new file mode 100644 index 0000000..65f9fb8 --- /dev/null +++ b/p2p/netaddress_test.go @@ -0,0 +1,192 @@ +package p2p + +import ( + "net" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNetAddress_String(t *testing.T) { + tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") + require.Nil(t, err) + + netAddr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) + + var wg sync.WaitGroup + + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _ = netAddr.String() + }() + } + + wg.Wait() + + s := netAddr.String() + require.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", s) +} + +func TestNewNetAddress(t *testing.T) { + tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") + require.Nil(t, err) + + assert.Panics(t, func() { + NewNetAddress("", tcpAddr) + }) + + addr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) + assert.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", addr.String()) + + assert.NotPanics(t, func() { + NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) + }, "Calling NewNetAddress with UDPAddr should not panic in testing") +} + +func TestNewNetAddressString(t *testing.T) { + testCases := []struct { + name string + addr string + expected string + correct bool + }{ + {"no node id and no protocol", "127.0.0.1:8080", "", false}, + {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, + {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, + + { + "no protocol", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "tcp input", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "udp input", + "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + // {"127.0.0:8080", false}, + {"invalid host", "notahost", "", false}, + {"invalid port", "127.0.0.1:notapath", "", false}, + {"invalid host w/ port", "notahost:8080", "", false}, + {"just a port", "8082", "", false}, + {"non-existent port", "127.0.0:8080000", "", false}, + + {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, + {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, + {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, + {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + { + "correct nodeId w/tcp", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + + {"no node id", "tcp://@127.0.0.1:8080", "", false}, + {"no node id or IP", "tcp://@", "", false}, + {"tcp no host, w/ port", "tcp://:26656", "", false}, + {"empty", "", "", false}, + {"node id delimiter 1", "@", "", false}, + {"node id delimiter 2", " @", "", false}, + {"node id delimiter 3", " @ ", "", false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + addr, err := NewNetAddressString(tc.addr) + if tc.correct { + if assert.Nil(t, err, tc.addr) { + assert.Equal(t, tc.expected, addr.String()) + } + } else { + assert.NotNil(t, err, tc.addr) + } + }) + } +} + +func TestNewNetAddressStrings(t *testing.T) { + addrs, errs := NewNetAddressStrings([]string{ + "127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) + assert.Len(t, errs, 1) + assert.Equal(t, 2, len(addrs)) +} + +func TestNewNetAddressIPPort(t *testing.T) { + addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) + assert.Equal(t, "127.0.0.1:8080", addr.String()) +} + +func TestNetAddressProperties(t *testing.T) { + // TODO add more test cases + testCases := []struct { + addr string + valid bool + local bool + routable bool + }{ + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true, true, false}, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", true, false, true}, + } + + for _, tc := range testCases { + addr, err := NewNetAddressString(tc.addr) + require.Nil(t, err) + + err = addr.Valid() + if tc.valid { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + assert.Equal(t, tc.local, addr.Local()) + assert.Equal(t, tc.routable, addr.Routable()) + } +} + +func TestNetAddressReachabilityTo(t *testing.T) { + // TODO add more test cases + testCases := []struct { + addr string + other string + reachability int + }{ + { + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081", + 0, + }, + {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1}, + } + + for _, tc := range testCases { + addr, err := NewNetAddressString(tc.addr) + require.Nil(t, err) + + other, err := NewNetAddressString(tc.other) + require.Nil(t, err) + + assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) + } +} diff --git a/p2p/node_info.go b/p2p/node_info.go new file mode 100644 index 0000000..81fe572 --- /dev/null +++ b/p2p/node_info.go @@ -0,0 +1,211 @@ +package p2p + +import ( + "bytes" + "fmt" + "reflect" + + cmtbytes "github.com/strangelove-ventures/cometbft-client/libs/bytes" + cmtstrings "github.com/strangelove-ventures/cometbft-client/libs/strings" +) + +const ( + maxNumChannels = 16 // plenty of room for upgrades, for now +) + +//------------------------------------------------------------- + +// NodeInfo exposes basic info of a node +// and determines if we're compatible. +type NodeInfo interface { + ID() ID + nodeInfoAddress + nodeInfoTransport +} + +type nodeInfoAddress interface { + NetAddress() (*NetAddress, error) +} + +// nodeInfoTransport validates a nodeInfo and checks +// our compatibility with it. It's for use in the handshake. +type nodeInfoTransport interface { + Validate() error + CompatibleWith(other NodeInfo) error +} + +//------------------------------------------------------------- + +// ProtocolVersion contains the protocol versions for the software. +type ProtocolVersion struct { + P2P uint64 `json:"p2p"` + Block uint64 `json:"block"` + App uint64 `json:"app"` +} + +// NewProtocolVersion returns a fully populated ProtocolVersion. +func NewProtocolVersion(p2p, block, app uint64) ProtocolVersion { + return ProtocolVersion{ + P2P: p2p, + Block: block, + App: app, + } +} + +//------------------------------------------------------------- + +// Assert DefaultNodeInfo satisfies NodeInfo +var _ NodeInfo = DefaultNodeInfo{} + +// DefaultNodeInfo is the basic node information exchanged +// between two peers during the CometBFT P2P handshake. +type DefaultNodeInfo struct { + ProtocolVersion ProtocolVersion `json:"protocol_version"` + + // Authenticate + // TODO: replace with NetAddress + DefaultNodeID ID `json:"id"` // authenticated identifier + ListenAddr string `json:"listen_addr"` // accepting incoming + + // Check compatibility. + // Channels are HexBytes so easier to read as JSON + Network string `json:"network"` // network/chain ID + Version string `json:"version"` // major.minor.revision + Channels cmtbytes.HexBytes `json:"channels"` // channels this node knows about + + // ASCIIText fields + Moniker string `json:"moniker"` // arbitrary moniker + Other DefaultNodeInfoOther `json:"other"` // other application specific data +} + +// DefaultNodeInfoOther is the misc. applcation specific data +type DefaultNodeInfoOther struct { + TxIndex string `json:"tx_index"` + RPCAddress string `json:"rpc_address"` +} + +// ID returns the node's peer ID. +func (info DefaultNodeInfo) ID() ID { + return info.DefaultNodeID +} + +// Validate checks the self-reported DefaultNodeInfo is safe. +// It returns an error if there +// are too many Channels, if there are any duplicate Channels, +// if the ListenAddr is malformed, or if the ListenAddr is a host name +// that can not be resolved to some IP. +// TODO: constraints for Moniker/Other? Or is that for the UI ? +// JAE: It needs to be done on the client, but to prevent ambiguous +// unicode characters, maybe it's worth sanitizing it here. +// In the future we might want to validate these, once we have a +// name-resolution system up. +// International clients could then use punycode (or we could use +// url-encoding), and we just need to be careful with how we handle that in our +// clients. (e.g. off by default). +func (info DefaultNodeInfo) Validate() error { + + // ID is already validated. + + // Validate ListenAddr. + _, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr)) + if err != nil { + return err + } + + // Network is validated in CompatibleWith. + + // Validate Version + if len(info.Version) > 0 && + (!cmtstrings.IsASCIIText(info.Version) || cmtstrings.ASCIITrim(info.Version) == "") { + + return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got %v", info.Version) + } + + // Validate Channels - ensure max and check for duplicates. + if len(info.Channels) > maxNumChannels { + return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) + } + channels := make(map[byte]struct{}) + for _, ch := range info.Channels { + _, ok := channels[ch] + if ok { + return fmt.Errorf("info.Channels contains duplicate channel id %v", ch) + } + channels[ch] = struct{}{} + } + + // Validate Moniker. + if !cmtstrings.IsASCIIText(info.Moniker) || cmtstrings.ASCIITrim(info.Moniker) == "" { + return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker) + } + + // Validate Other. + other := info.Other + txIndex := other.TxIndex + switch txIndex { + case "", "on", "off": + default: + return fmt.Errorf("info.Other.TxIndex should be either 'on', 'off', or empty string, got '%v'", txIndex) + } + // XXX: Should we be more strict about address formats? + rpcAddr := other.RPCAddress + if len(rpcAddr) > 0 && (!cmtstrings.IsASCIIText(rpcAddr) || cmtstrings.ASCIITrim(rpcAddr) == "") { + return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) + } + + return nil +} + +// CompatibleWith checks if two DefaultNodeInfo are compatible with eachother. +// CONTRACT: two nodes are compatible if the Block version and network match +// and they have at least one channel in common. +func (info DefaultNodeInfo) CompatibleWith(otherInfo NodeInfo) error { + other, ok := otherInfo.(DefaultNodeInfo) + if !ok { + return fmt.Errorf("wrong NodeInfo type. Expected DefaultNodeInfo, got %v", reflect.TypeOf(otherInfo)) + } + + if info.ProtocolVersion.Block != other.ProtocolVersion.Block { + return fmt.Errorf("peer is on a different Block version. Got %v, expected %v", + other.ProtocolVersion.Block, info.ProtocolVersion.Block) + } + + // nodes must be on the same network + if info.Network != other.Network { + return fmt.Errorf("peer is on a different network. Got %v, expected %v", other.Network, info.Network) + } + + // if we have no channels, we're just testing + if len(info.Channels) == 0 { + return nil + } + + // for each of our channels, check if they have it + found := false +OUTER_LOOP: + for _, ch1 := range info.Channels { + for _, ch2 := range other.Channels { + if ch1 == ch2 { + found = true + break OUTER_LOOP // only need one + } + } + } + if !found { + return fmt.Errorf("peer has no common channels. Our channels: %v ; Peer channels: %v", info.Channels, other.Channels) + } + return nil +} + +// NetAddress returns a NetAddress derived from the DefaultNodeInfo - +// it includes the authenticated peer ID and the self-reported +// ListenAddr. Note that the ListenAddr is not authenticated and +// may not match that address actually dialed if its an outbound peer. +func (info DefaultNodeInfo) NetAddress() (*NetAddress, error) { + idAddr := IDAddressString(info.ID(), info.ListenAddr) + return NewNetAddressString(idAddr) +} + +func (info DefaultNodeInfo) HasChannel(chID byte) bool { + return bytes.Contains(info.Channels, []byte{chID}) +} diff --git a/p2p/types.go b/p2p/types.go new file mode 100644 index 0000000..510de5e --- /dev/null +++ b/p2p/types.go @@ -0,0 +1,5 @@ +package p2p + +import "github.com/strangelove-ventures/cometbft-client/p2p/conn" + +type ConnectionStatus = conn.ConnectionStatus diff --git a/proto/tendermint/crypto/keys.go b/proto/tendermint/crypto/keys.go new file mode 100644 index 0000000..a7c197c --- /dev/null +++ b/proto/tendermint/crypto/keys.go @@ -0,0 +1,216 @@ +package crypto + +import ( + "bytes" + math_bits "math/bits" +) + +var _ isPublicKey_Sum = &PublicKey_Ed25519{} +var _ isPublicKey_Sum = &PublicKey_Secp256K1{} + +// PublicKey defines the keys available for use with Validators +type PublicKey struct { + // Types that are valid to be assigned to Sum: + // + // *PublicKey_Ed25519 + // *PublicKey_Secp256K1 + Sum isPublicKey_Sum `protobuf_oneof:"sum"` +} + +type isPublicKey_Sum interface { + isPublicKey_Sum() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type PublicKey_Ed25519 struct { + Ed25519 []byte `protobuf:"bytes,1,opt,name=ed25519,proto3,oneof" json:"ed25519,omitempty"` +} +type PublicKey_Secp256K1 struct { + Secp256K1 []byte `protobuf:"bytes,2,opt,name=secp256k1,proto3,oneof" json:"secp256k1,omitempty"` +} + +func (*PublicKey_Ed25519) isPublicKey_Sum() {} +func (*PublicKey_Secp256K1) isPublicKey_Sum() {} +func (this *PublicKey_Ed25519) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PublicKey_Ed25519) + if !ok { + that2, ok := that.(PublicKey_Ed25519) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Ed25519, that1.Ed25519); c != 0 { + return c + } + return 0 +} +func (this *PublicKey_Secp256K1) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PublicKey_Secp256K1) + if !ok { + that2, ok := that.(PublicKey_Secp256K1) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Secp256K1, that1.Secp256K1); c != 0 { + return c + } + return 0 +} +func (this *PublicKey_Ed25519) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PublicKey_Ed25519) + if !ok { + that2, ok := that.(PublicKey_Ed25519) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Ed25519, that1.Ed25519) { + return false + } + return true +} +func (this *PublicKey_Secp256K1) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PublicKey_Secp256K1) + if !ok { + that2, ok := that.(PublicKey_Secp256K1) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Secp256K1, that1.Secp256K1) { + return false + } + return true +} + +func (m *PublicKey_Ed25519) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PublicKey_Ed25519) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ed25519 != nil { + i -= len(m.Ed25519) + copy(dAtA[i:], m.Ed25519) + i = encodeVarintKeys(dAtA, i, uint64(len(m.Ed25519))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PublicKey_Secp256K1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PublicKey_Secp256K1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Secp256K1 != nil { + i -= len(m.Secp256K1) + copy(dAtA[i:], m.Secp256K1) + i = encodeVarintKeys(dAtA, i, uint64(len(m.Secp256K1))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func encodeVarintKeys(dAtA []byte, offset int, v uint64) int { + offset -= sovKeys(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PublicKey_Ed25519) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ed25519 != nil { + l = len(m.Ed25519) + n += 1 + l + sovKeys(uint64(l)) + } + return n +} +func (m *PublicKey_Secp256K1) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secp256K1 != nil { + l = len(m.Secp256K1) + n += 1 + l + sovKeys(uint64(l)) + } + return n +} +func sovKeys(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go new file mode 100644 index 0000000..dc64ae2 --- /dev/null +++ b/rpc/client/http/http.go @@ -0,0 +1,790 @@ +package http + +import ( + "context" + "errors" + "net/http" + "strings" + "time" + + "github.com/strangelove-ventures/cometbft-client/libs/bytes" + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" + "github.com/strangelove-ventures/cometbft-client/libs/log" + cmtpubsub "github.com/strangelove-ventures/cometbft-client/libs/pubsub" + "github.com/strangelove-ventures/cometbft-client/libs/service" + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" + rpcclient "github.com/strangelove-ventures/cometbft-client/rpc/client" + ctypes "github.com/strangelove-ventures/cometbft-client/rpc/core/types" + jsonrpcclient "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/client" + "github.com/strangelove-ventures/cometbft-client/types" +) + +/* +HTTP is a Client implementation that communicates with a CometBFT node over +JSON RPC and WebSockets. + +This is the main implementation you probably want to use in production code. +There are other implementations when calling the CometBFT node in-process +(Local), or when you want to mock out the server for test code (mock). + +You can subscribe for any event published by CometBFT using Subscribe method. +Note delivery is best-effort. If you don't read events fast enough or network is +slow, CometBFT might cancel the subscription. The client will attempt to +resubscribe (you don't need to do anything). It will keep trying every second +indefinitely until successful. + +Request batching is available for JSON RPC requests over HTTP, which conforms to +the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See +the example for more details. + +Example: + + c, err := New("http://192.168.1.10:26657", "/websocket") + if err != nil { + // handle error + } + + // call Start/Stop if you're subscribing to events + err = c.Start() + if err != nil { + // handle error + } + defer c.Stop() + + res, err := c.Status() + if err != nil { + // handle error + } + + // handle result +*/ +type HTTP struct { + remote string + rpc *jsonrpcclient.Client + + *baseRPCClient + *WSEvents +} + +// BatchHTTP provides the same interface as `HTTP`, but allows for batching of +// requests (as per https://www.jsonrpc.org/specification#batch). Do not +// instantiate directly - rather use the HTTP.NewBatch() method to create an +// instance of this struct. +// +// Batching of HTTP requests is thread-safe in the sense that multiple +// goroutines can each create their own batches and send them using the same +// HTTP client. Multiple goroutines could also enqueue transactions in a single +// batch, but ordering of transactions in the batch cannot be guaranteed in such +// an example. +type BatchHTTP struct { + rpcBatch *jsonrpcclient.RequestBatch + *baseRPCClient +} + +// rpcClient is an internal interface to which our RPC clients (batch and +// non-batch) must conform. Acts as an additional code-level sanity check to +// make sure the implementations stay coherent. +type rpcClient interface { + rpcclient.ABCIClient + rpcclient.HistoryClient + rpcclient.NetworkClient + rpcclient.SignClient + rpcclient.StatusClient +} + +// baseRPCClient implements the basic RPC method logic without the actual +// underlying RPC call functionality, which is provided by `caller`. +type baseRPCClient struct { + caller jsonrpcclient.Caller +} + +var ( + _ rpcClient = (*HTTP)(nil) + _ rpcClient = (*BatchHTTP)(nil) + _ rpcClient = (*baseRPCClient)(nil) +) + +//----------------------------------------------------------------------------- +// HTTP + +// New takes a remote endpoint in the form ://: and +// the websocket path (which always seems to be "/websocket") +// An error is returned on invalid remote. The function panics when remote is nil. +func New(remote, wsEndpoint string) (*HTTP, error) { + httpClient, err := jsonrpcclient.DefaultHTTPClient(remote) + if err != nil { + return nil, err + } + return NewWithClient(remote, wsEndpoint, httpClient) +} + +// Create timeout enabled http client +func NewWithTimeout(remote, wsEndpoint string, timeout uint) (*HTTP, error) { + httpClient, err := jsonrpcclient.DefaultHTTPClient(remote) + if err != nil { + return nil, err + } + httpClient.Timeout = time.Duration(timeout) * time.Second + return NewWithClient(remote, wsEndpoint, httpClient) +} + +// NewWithClient allows for setting a custom http client (See New). +// An error is returned on invalid remote. The function panics when remote is nil. +func NewWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, error) { + if client == nil { + panic("nil http.Client provided") + } + + rc, err := jsonrpcclient.NewWithHTTPClient(remote, client) + if err != nil { + return nil, err + } + + wsEvents, err := newWSEvents(remote, wsEndpoint) + if err != nil { + return nil, err + } + + httpClient := &HTTP{ + rpc: rc, + remote: remote, + baseRPCClient: &baseRPCClient{caller: rc}, + WSEvents: wsEvents, + } + + return httpClient, nil +} + +var _ rpcclient.Client = (*HTTP)(nil) + +// SetLogger sets a logger. +func (c *HTTP) SetLogger(l log.Logger) { + c.WSEvents.SetLogger(l) +} + +// Remote returns the remote network address in a string form. +func (c *HTTP) Remote() string { + return c.remote +} + +// NewBatch creates a new batch client for this HTTP client. +func (c *HTTP) NewBatch() *BatchHTTP { + rpcBatch := c.rpc.NewRequestBatch() + return &BatchHTTP{ + rpcBatch: rpcBatch, + baseRPCClient: &baseRPCClient{ + caller: rpcBatch, + }, + } +} + +//----------------------------------------------------------------------------- +// BatchHTTP + +// Send is a convenience function for an HTTP batch that will trigger the +// compilation of the batched requests and send them off using the client as a +// single request. On success, this returns a list of the deserialized results +// from each request in the sent batch. +func (b *BatchHTTP) Send(ctx context.Context) ([]interface{}, error) { + return b.rpcBatch.Send(ctx) +} + +// Clear will empty out this batch of requests and return the number of requests +// that were cleared out. +func (b *BatchHTTP) Clear() int { + return b.rpcBatch.Clear() +} + +// Count returns the number of enqueued requests waiting to be sent. +func (b *BatchHTTP) Count() int { + return b.rpcBatch.Count() +} + +//----------------------------------------------------------------------------- +// baseRPCClient + +func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) { + result := new(ctypes.ResultStatus) + _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { + result := new(ctypes.ResultABCIInfo) + _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *baseRPCClient) ABCIQuery( + ctx context.Context, + path string, + data bytes.HexBytes, +) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) +} + +func (c *baseRPCClient) ABCIQueryWithOptions( + ctx context.Context, + path string, + data bytes.HexBytes, + opts rpcclient.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { + result := new(ctypes.ResultABCIQuery) + _, err := c.caller.Call(ctx, "abci_query", + map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, + result) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *baseRPCClient) BroadcastTxCommit( + ctx context.Context, + tx types.Tx, +) (*ctypes.ResultBroadcastTxCommit, error) { + result := new(ctypes.ResultBroadcastTxCommit) + _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) BroadcastTxAsync( + ctx context.Context, + tx types.Tx, +) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX(ctx, "broadcast_tx_async", tx) +} + +func (c *baseRPCClient) BroadcastTxSync( + ctx context.Context, + tx types.Tx, +) (*ctypes.ResultBroadcastTx, error) { + return c.broadcastTX(ctx, "broadcast_tx_sync", tx) +} + +func (c *baseRPCClient) broadcastTX( + ctx context.Context, + route string, + tx types.Tx, +) (*ctypes.ResultBroadcastTx, error) { + result := new(ctypes.ResultBroadcastTx) + _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) UnconfirmedTxs( + ctx context.Context, + limit *int, +) (*ctypes.ResultUnconfirmedTxs, error) { + result := new(ctypes.ResultUnconfirmedTxs) + params := make(map[string]interface{}) + if limit != nil { + params["limit"] = limit + } + _, err := c.caller.Call(ctx, "unconfirmed_txs", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { + result := new(ctypes.ResultUnconfirmedTxs) + _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + result := new(ctypes.ResultCheckTx) + _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { + result := new(ctypes.ResultNetInfo) + _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { + result := new(ctypes.ResultDumpConsensusState) + _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { + result := new(ctypes.ResultConsensusState) + _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) ConsensusParams( + ctx context.Context, + height *int64, +) (*ctypes.ResultConsensusParams, error) { + result := new(ctypes.ResultConsensusParams) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "consensus_params", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) { + result := new(ctypes.ResultHealth) + _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) BlockchainInfo( + ctx context.Context, + minHeight, + maxHeight int64, +) (*ctypes.ResultBlockchainInfo, error) { + result := new(ctypes.ResultBlockchainInfo) + _, err := c.caller.Call(ctx, "blockchain", + map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, + result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { + result := new(ctypes.ResultGenesis) + _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { + result := new(ctypes.ResultGenesisChunk) + _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { + result := new(ctypes.ResultBlock) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "block", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { + result := new(ctypes.ResultBlock) + params := map[string]interface{}{ + "hash": hash, + } + _, err := c.caller.Call(ctx, "block_by_hash", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) BlockResults( + ctx context.Context, + height *int64, +) (*ctypes.ResultBlockResults, error) { + result := new(ctypes.ResultBlockResults) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "block_results", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { + result := new(ctypes.ResultHeader) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "header", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { + result := new(ctypes.ResultHeader) + params := map[string]interface{}{ + "hash": hash, + } + _, err := c.caller.Call(ctx, "header_by_hash", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { + result := new(ctypes.ResultCommit) + params := make(map[string]interface{}) + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "commit", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + result := new(ctypes.ResultTx) + params := map[string]interface{}{ + "hash": hash, + "prove": prove, + } + _, err := c.caller.Call(ctx, "tx", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) TxSearch( + ctx context.Context, + query string, + prove bool, + page, + perPage *int, + orderBy string, +) (*ctypes.ResultTxSearch, error) { + result := new(ctypes.ResultTxSearch) + params := map[string]interface{}{ + "query": query, + "prove": prove, + "order_by": orderBy, + } + + if page != nil { + params["page"] = page + } + if perPage != nil { + params["per_page"] = perPage + } + + _, err := c.caller.Call(ctx, "tx_search", params, result) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *baseRPCClient) BlockSearch( + ctx context.Context, + query string, + page, perPage *int, + orderBy string, +) (*ctypes.ResultBlockSearch, error) { + result := new(ctypes.ResultBlockSearch) + params := map[string]interface{}{ + "query": query, + "order_by": orderBy, + } + + if page != nil { + params["page"] = page + } + if perPage != nil { + params["per_page"] = perPage + } + + _, err := c.caller.Call(ctx, "block_search", params, result) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *baseRPCClient) Validators( + ctx context.Context, + height *int64, + page, + perPage *int, +) (*ctypes.ResultValidators, error) { + result := new(ctypes.ResultValidators) + params := make(map[string]interface{}) + if page != nil { + params["page"] = page + } + if perPage != nil { + params["per_page"] = perPage + } + if height != nil { + params["height"] = height + } + _, err := c.caller.Call(ctx, "validators", params, result) + if err != nil { + return nil, err + } + return result, nil +} + +func (c *baseRPCClient) BroadcastEvidence( + ctx context.Context, + ev types.Evidence, +) (*ctypes.ResultBroadcastEvidence, error) { + result := new(ctypes.ResultBroadcastEvidence) + _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) + if err != nil { + return nil, err + } + return result, nil +} + +//----------------------------------------------------------------------------- +// WSEvents + +var errNotRunning = errors.New("client is not running. Use .Start() method to start") + +// WSEvents is a wrapper around WSClient, which implements EventsClient. +type WSEvents struct { + service.BaseService + remote string + endpoint string + ws *jsonrpcclient.WSClient + + mtx cmtsync.RWMutex + subscriptions map[string]chan ctypes.ResultEvent // query -> chan +} + +func newWSEvents(remote, endpoint string) (*WSEvents, error) { + w := &WSEvents{ + endpoint: endpoint, + remote: remote, + subscriptions: make(map[string]chan ctypes.ResultEvent), + } + w.BaseService = *service.NewBaseService(nil, "WSEvents", w) + + var err error + w.ws, err = jsonrpcclient.NewWS(w.remote, w.endpoint, jsonrpcclient.OnReconnect(func() { + // resubscribe immediately + w.redoSubscriptionsAfter(0 * time.Second) + })) + if err != nil { + return nil, err + } + w.ws.SetLogger(w.Logger) + + return w, nil +} + +// OnStart implements service.Service by starting WSClient and event loop. +func (w *WSEvents) OnStart() error { + if err := w.ws.Start(); err != nil { + return err + } + + go w.eventListener() + + return nil +} + +// OnStop implements service.Service by stopping WSClient. +func (w *WSEvents) OnStop() { + if err := w.ws.Stop(); err != nil { + w.Logger.Error("Can't stop ws client", "err", err) + } +} + +// Subscribe implements EventsClient by using WSClient to subscribe given +// subscriber to query. By default, returns a channel with cap=1. Error is +// returned if it fails to subscribe. +// +// Channel is never closed to prevent clients from seeing an erroneous event. +// +// It returns an error if WSEvents is not running. +func (w *WSEvents) Subscribe(ctx context.Context, _, query string, + outCapacity ...int, +) (out <-chan ctypes.ResultEvent, err error) { + if !w.IsRunning() { + return nil, errNotRunning + } + + if err := w.ws.Subscribe(ctx, query); err != nil { + return nil, err + } + + outCap := 1 + if len(outCapacity) > 0 { + outCap = outCapacity[0] + } + + outc := make(chan ctypes.ResultEvent, outCap) + w.mtx.Lock() + // subscriber param is ignored because CometBFT will override it with + // remote IP anyway. + w.subscriptions[query] = outc + w.mtx.Unlock() + + return outc, nil +} + +// Unsubscribe implements EventsClient by using WSClient to unsubscribe given +// subscriber from query. +// +// It returns an error if WSEvents is not running. +func (w *WSEvents) Unsubscribe(ctx context.Context, _, query string) error { + if !w.IsRunning() { + return errNotRunning + } + + if err := w.ws.Unsubscribe(ctx, query); err != nil { + return err + } + + w.mtx.Lock() + _, ok := w.subscriptions[query] + if ok { + delete(w.subscriptions, query) + } + w.mtx.Unlock() + + return nil +} + +// UnsubscribeAll implements EventsClient by using WSClient to unsubscribe +// given subscriber from all the queries. +// +// It returns an error if WSEvents is not running. +func (w *WSEvents) UnsubscribeAll(ctx context.Context, _ string) error { + if !w.IsRunning() { + return errNotRunning + } + + if err := w.ws.UnsubscribeAll(ctx); err != nil { + return err + } + + w.mtx.Lock() + w.subscriptions = make(map[string]chan ctypes.ResultEvent) + w.mtx.Unlock() + + return nil +} + +// After being reconnected, it is necessary to redo subscription to server +// otherwise no data will be automatically received. +func (w *WSEvents) redoSubscriptionsAfter(d time.Duration) { + time.Sleep(d) + + w.mtx.RLock() + defer w.mtx.RUnlock() + for q := range w.subscriptions { + err := w.ws.Subscribe(context.Background(), q) + if err != nil { + w.Logger.Error("Failed to resubscribe", "err", err) + } + } +} + +func isErrAlreadySubscribed(err error) bool { + return strings.Contains(err.Error(), cmtpubsub.ErrAlreadySubscribed.Error()) +} + +func (w *WSEvents) eventListener() { + for { + select { + case resp, ok := <-w.ws.ResponsesCh: + if !ok { + return + } + + if resp.Error != nil { + w.Logger.Error("WS error", "err", resp.Error.Error()) + // Error can be ErrAlreadySubscribed or max client (subscriptions per + // client) reached or CometBFT exited. + // We can ignore ErrAlreadySubscribed, but need to retry in other + // cases. + if !isErrAlreadySubscribed(resp.Error) { + // Resubscribe after 1 second to give CometBFT time to restart (if + // crashed). + w.redoSubscriptionsAfter(1 * time.Second) + } + continue + } + + result := new(ctypes.ResultEvent) + err := cmtjson.Unmarshal(resp.Result, result) + if err != nil { + w.Logger.Error("failed to unmarshal response", "err", err) + continue + } + + w.mtx.RLock() + if out, ok := w.subscriptions[result.Query]; ok { + if cap(out) == 0 { + out <- *result + } else { + select { + case out <- *result: + default: + w.Logger.Error("wanted to publish ResultEvent, but out channel is full", "result", result, "query", result.Query) + } + } + } + w.mtx.RUnlock() + case <-w.Quit(): + return + } + } +} diff --git a/rpc/client/interface.go b/rpc/client/interface.go new file mode 100644 index 0000000..51b3105 --- /dev/null +++ b/rpc/client/interface.go @@ -0,0 +1,154 @@ +package client + +/* +The client package provides a general purpose interface (Client) for connecting +to a CometBFT node, as well as higher-level functionality. + +The main implementation for production code is client.HTTP, which +connects via http to the jsonrpc interface of the CometBFT node. + +For connecting to a node running in the same process (eg. when +compiling the abci app in the same process), you can use the client.Local +implementation. + +For mocking out server responses during testing to see behavior for +arbitrary return values, use the mock package. + +In addition to the Client interface, which should be used externally +for maximum flexibility and testability, and two implementations, +this package also provides helper functions that work on any Client +implementation. +*/ + +import ( + "context" + + "github.com/strangelove-ventures/cometbft-client/libs/bytes" + "github.com/strangelove-ventures/cometbft-client/libs/service" + ctypes "github.com/strangelove-ventures/cometbft-client/rpc/core/types" + "github.com/strangelove-ventures/cometbft-client/types" +) + +// Client wraps most important rpc calls a client would make if you want to +// listen for events, test if it also implements events.EventSwitch. +type Client interface { + service.Service + ABCIClient + EventsClient + HistoryClient + NetworkClient + SignClient + StatusClient + //EvidenceClient + MempoolClient +} + +// ABCIClient groups together the functionality that principally affects the +// ABCI app. +// +// In many cases this will be all we want, so we can accept an interface which +// is easier to mock. +type ABCIClient interface { + // Reading from abci app + ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) + ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) + ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, + opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) + + // Writing to abci app + BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) +} + +// SignClient groups together the functionality needed to get valid signatures +// and prove anything about the chain. +type SignClient interface { + Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) + BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) + BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) + Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) + HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) + Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) + Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) + Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) + + // TxSearch defines a method to search for a paginated set of transactions by + // transaction event search criteria. + TxSearch( + ctx context.Context, + query string, + prove bool, + page, perPage *int, + orderBy string, + ) (*ctypes.ResultTxSearch, error) + + // BlockSearch defines a method to search for a paginated set of blocks based + // from FinalizeBlock event search criteria. + BlockSearch( + ctx context.Context, + query string, + page, perPage *int, + orderBy string, + ) (*ctypes.ResultBlockSearch, error) +} + +// HistoryClient provides access to data from genesis to now in large chunks. +type HistoryClient interface { + Genesis(context.Context) (*ctypes.ResultGenesis, error) + GenesisChunked(context.Context, uint) (*ctypes.ResultGenesisChunk, error) + BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) +} + +// StatusClient provides access to general chain info. +type StatusClient interface { + Status(context.Context) (*ctypes.ResultStatus, error) +} + +// NetworkClient is general info about the network state. May not be needed +// usually. +type NetworkClient interface { + NetInfo(context.Context) (*ctypes.ResultNetInfo, error) + DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) + ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) + ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) + Health(context.Context) (*ctypes.ResultHealth, error) +} + +// EventsClient is reactive, you can subscribe to any message, given the proper +// string. see cometbft/types/events.go +type EventsClient interface { + // Subscribe subscribes given subscriber to query. Returns a channel with + // cap=1 onto which events are published. An error is returned if it fails to + // subscribe. outCapacity can be used optionally to set capacity for the + // channel. Channel is never closed to prevent accidental reads. + // + // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe + // or UnsubscribeAll. + Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) + // Unsubscribe unsubscribes given subscriber from query. + Unsubscribe(ctx context.Context, subscriber, query string) error + // UnsubscribeAll unsubscribes given subscriber from all the queries. + UnsubscribeAll(ctx context.Context, subscriber string) error +} + +// MempoolClient shows us data about current mempool state. +type MempoolClient interface { + UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) + CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error) +} + +// EvidenceClient is used for submitting an evidence of the malicious +// behavior. +//type EvidenceClient interface { +// BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error) +//} + +// RemoteClient is a Client, which can also return the remote network address. +type RemoteClient interface { + Client + + // Remote returns the remote network address in a string form. + Remote() string +} diff --git a/rpc/client/types.go b/rpc/client/types.go new file mode 100644 index 0000000..6a23fa4 --- /dev/null +++ b/rpc/client/types.go @@ -0,0 +1,11 @@ +package client + +// ABCIQueryOptions can be used to provide options for ABCIQuery call other +// than the DefaultABCIQueryOptions. +type ABCIQueryOptions struct { + Height int64 + Prove bool +} + +// DefaultABCIQueryOptions are latest height (0) and prove false. +var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go new file mode 100644 index 0000000..6ff1835 --- /dev/null +++ b/rpc/core/types/responses.go @@ -0,0 +1,243 @@ +package coretypes + +import ( + "encoding/json" + "time" + + abci "github.com/strangelove-ventures/cometbft-client/abci/types" + "github.com/strangelove-ventures/cometbft-client/crypto" + "github.com/strangelove-ventures/cometbft-client/libs/bytes" + "github.com/strangelove-ventures/cometbft-client/p2p" + "github.com/strangelove-ventures/cometbft-client/types" +) + +// List of blocks +type ResultBlockchainInfo struct { + LastHeight int64 `json:"last_height"` + BlockMetas []*types.BlockMeta `json:"block_metas"` +} + +// Genesis file +type ResultGenesis struct { + Genesis *types.GenesisDoc `json:"genesis"` +} + +// ResultGenesisChunk is the output format for the chunked/paginated +// interface. These chunks are produced by converting the genesis +// document to JSON and then splitting the resulting payload into +// 16 megabyte blocks and then base64 encoding each block. +type ResultGenesisChunk struct { + ChunkNumber int `json:"chunk"` + TotalChunks int `json:"total"` + Data string `json:"data"` +} + +// Single block (with meta) +type ResultBlock struct { + BlockID types.BlockID `json:"block_id"` + Block *types.Block `json:"block"` +} + +// ResultHeader represents the response for a Header RPC Client query +type ResultHeader struct { + Header *types.Header `json:"header"` +} + +// Commit and Header +type ResultCommit struct { + types.SignedHeader `json:"signed_header"` + CanonicalCommit bool `json:"canonical"` +} + +// ABCI results from a block +type ResultBlockResults struct { + Height int64 `json:"height"` + TxsResults []*abci.ExecTxResult `json:"txs_results"` + BeginBlockEvents []abci.Event `json:"begin_block_events"` + EndBlockEvents []abci.Event `json:"end_block_events"` + FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` + ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` + //ConsensusParamUpdates *cmtproto.ConsensusParams `json:"consensus_param_updates"` + AppHash []byte `json:"app_hash"` +} + +// Info about the node's syncing state +type SyncInfo struct { + LatestBlockHash bytes.HexBytes `json:"latest_block_hash"` + LatestAppHash bytes.HexBytes `json:"latest_app_hash"` + LatestBlockHeight int64 `json:"latest_block_height"` + LatestBlockTime time.Time `json:"latest_block_time"` + + EarliestBlockHash bytes.HexBytes `json:"earliest_block_hash"` + EarliestAppHash bytes.HexBytes `json:"earliest_app_hash"` + EarliestBlockHeight int64 `json:"earliest_block_height"` + EarliestBlockTime time.Time `json:"earliest_block_time"` + + CatchingUp bool `json:"catching_up"` +} + +// Info about the node's validator +type ValidatorInfo struct { + Address bytes.HexBytes `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + VotingPower int64 `json:"voting_power"` +} + +// Node Status +type ResultStatus struct { + NodeInfo p2p.DefaultNodeInfo `json:"node_info"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` +} + +// Is TxIndexing enabled +func (s *ResultStatus) TxIndexEnabled() bool { + if s == nil { + return false + } + return s.NodeInfo.Other.TxIndex == "on" +} + +// Info about peer connections +type ResultNetInfo struct { + Listening bool `json:"listening"` + Listeners []string `json:"listeners"` + NPeers int `json:"n_peers"` + Peers []Peer `json:"peers"` +} + +// Log from dialing seeds +type ResultDialSeeds struct { + Log string `json:"log"` +} + +// Log from dialing peers +type ResultDialPeers struct { + Log string `json:"log"` +} + +// A peer +type Peer struct { + NodeInfo p2p.DefaultNodeInfo `json:"node_info"` + IsOutbound bool `json:"is_outbound"` + ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` + RemoteIP string `json:"remote_ip"` +} + +// Validators for a height. +type ResultValidators struct { + BlockHeight int64 `json:"block_height"` + Validators []*types.Validator `json:"validators"` + // Count of actual validators in this result + Count int `json:"count"` + // Total number of validators + Total int `json:"total"` +} + +// ConsensusParams for given height +type ResultConsensusParams struct { + BlockHeight int64 `json:"block_height"` + ConsensusParams types.ConsensusParams `json:"consensus_params"` +} + +// Info about the consensus state. +// UNSTABLE +type ResultDumpConsensusState struct { + RoundState json.RawMessage `json:"round_state"` + Peers []PeerStateInfo `json:"peers"` +} + +// UNSTABLE +type PeerStateInfo struct { + NodeAddress string `json:"node_address"` + PeerState json.RawMessage `json:"peer_state"` +} + +// UNSTABLE +type ResultConsensusState struct { + RoundState json.RawMessage `json:"round_state"` +} + +// CheckTx result +type ResultBroadcastTx struct { + Code uint32 `json:"code"` + Data bytes.HexBytes `json:"data"` + Log string `json:"log"` + Codespace string `json:"codespace"` + + Hash bytes.HexBytes `json:"hash"` +} + +// CheckTx and ExecTx results +type ResultBroadcastTxCommit struct { + CheckTx abci.ResponseCheckTx `json:"check_tx"` + TxResult abci.ExecTxResult `json:"tx_result"` + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height"` +} + +// ResultCheckTx wraps abci.ResponseCheckTx. +type ResultCheckTx struct { + abci.ResponseCheckTx +} + +// Result of querying for a tx +type ResultTx struct { + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height"` + Index uint32 `json:"index"` + TxResult abci.ExecTxResult `json:"tx_result"` + Tx types.Tx `json:"tx"` + Proof types.TxProof `json:"proof,omitempty"` +} + +// Result of searching for txs +type ResultTxSearch struct { + Txs []*ResultTx `json:"txs"` + TotalCount int `json:"total_count"` +} + +// ResultBlockSearch defines the RPC response type for a block search by events. +type ResultBlockSearch struct { + Blocks []*ResultBlock `json:"blocks"` + TotalCount int `json:"total_count"` +} + +// List of mempool txs +type ResultUnconfirmedTxs struct { + Count int `json:"n_txs"` + Total int `json:"total"` + TotalBytes int64 `json:"total_bytes"` + Txs []types.Tx `json:"txs"` +} + +// Info abci msg +type ResultABCIInfo struct { + Response abci.ResponseInfo `json:"response"` +} + +// Query abci msg +type ResultABCIQuery struct { + Response abci.ResponseQuery `json:"response"` +} + +// Result of broadcasting evidence +type ResultBroadcastEvidence struct { + Hash []byte `json:"hash"` +} + +// empty results +type ( + ResultUnsafeFlushMempool struct{} + ResultUnsafeProfile struct{} + ResultSubscribe struct{} + ResultUnsubscribe struct{} + ResultHealth struct{} +) + +// Event data from a subscription +type ResultEvent struct { + Query string `json:"query"` + Data types.TMEventData `json:"data"` + Events map[string][]string `json:"events"` +} diff --git a/rpc/jsonrpc/client/args_test.go b/rpc/jsonrpc/client/args_test.go new file mode 100644 index 0000000..2506f30 --- /dev/null +++ b/rpc/jsonrpc/client/args_test.go @@ -0,0 +1,39 @@ +package client + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type Tx []byte + +type Foo struct { + Bar int + Baz string +} + +func TestArgToJSON(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + cases := []struct { + input interface{} + expected string + }{ + {[]byte("1234"), "0x31323334"}, + {Tx("654"), "0x363534"}, + {Foo{7, "hello"}, `{"Bar":"7","Baz":"hello"}`}, + } + + for i, tc := range cases { + args := map[string]interface{}{"data": tc.input} + err := argsToJSON(args) + require.Nil(err, "%d: %+v", i, err) + require.Equal(1, len(args), "%d", i) + data, ok := args["data"].(string) + require.True(ok, "%d: %#v", i, args["data"]) + assert.Equal(tc.expected, data, "%d", i) + } +} diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go new file mode 100644 index 0000000..c3c5c1f --- /dev/null +++ b/rpc/jsonrpc/client/decode.go @@ -0,0 +1,126 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" + "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +func unmarshalResponseBytes( + responseBytes []byte, + expectedID types.JSONRPCIntID, + result interface{}, +) (interface{}, error) { + + // Read response. If rpc/core/types is imported, the result will unmarshal + // into the correct type. + response := &types.RPCResponse{} + if err := json.Unmarshal(responseBytes, response); err != nil { + return nil, fmt.Errorf("error unmarshalling: %w", err) + } + + if response.Error != nil { + return nil, response.Error + } + + if err := validateAndVerifyID(response, expectedID); err != nil { + return nil, fmt.Errorf("wrong ID: %w", err) + } + + // Unmarshal the RawMessage into the result. + if err := cmtjson.Unmarshal(response.Result, result); err != nil { + return nil, fmt.Errorf("error unmarshalling result: %w", err) + } + + return result, nil +} + +func unmarshalResponseBytesArray( + responseBytes []byte, + expectedIDs []types.JSONRPCIntID, + results []interface{}, +) ([]interface{}, error) { + + var ( + responses []types.RPCResponse + ) + + if err := json.Unmarshal(responseBytes, &responses); err != nil { + return nil, fmt.Errorf("error unmarshalling: %w", err) + } + + // No response error checking here as there may be a mixture of successful + // and unsuccessful responses. + + if len(results) != len(responses) { + return nil, fmt.Errorf( + "expected %d result objects into which to inject responses, but got %d", + len(responses), + len(results), + ) + } + + // Intersect IDs from responses with expectedIDs. + ids := make([]types.JSONRPCIntID, len(responses)) + var ok bool + for i, resp := range responses { + ids[i], ok = resp.ID.(types.JSONRPCIntID) + if !ok { + return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) + } + } + if err := validateResponseIDs(ids, expectedIDs); err != nil { + return nil, fmt.Errorf("wrong IDs: %w", err) + } + + for i := 0; i < len(responses); i++ { + if err := cmtjson.Unmarshal(responses[i].Result, results[i]); err != nil { + return nil, fmt.Errorf("error unmarshalling #%d result: %w", i, err) + } + } + + return results, nil +} + +func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { + m := make(map[types.JSONRPCIntID]bool, len(expectedIDs)) + for _, expectedID := range expectedIDs { + m[expectedID] = true + } + + for i, id := range ids { + if m[id] { + delete(m, id) + } else { + return fmt.Errorf("unsolicited ID #%d: %v", i, id) + } + } + + return nil +} + +// From the JSON-RPC 2.0 spec: +// id: It MUST be the same as the value of the id member in the Request Object. +func validateAndVerifyID(res *types.RPCResponse, expectedID types.JSONRPCIntID) error { + if err := validateResponseID(res.ID); err != nil { + return err + } + if expectedID != res.ID.(types.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type + return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) + } + return nil +} + +func validateResponseID(id interface{}) error { + if id == nil { + return errors.New("no ID") + } + _, ok := id.(types.JSONRPCIntID) + if !ok { + return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) + } + return nil +} diff --git a/rpc/jsonrpc/client/encode.go b/rpc/jsonrpc/client/encode.go new file mode 100644 index 0000000..a6b81c0 --- /dev/null +++ b/rpc/jsonrpc/client/encode.go @@ -0,0 +1,46 @@ +package client + +import ( + "fmt" + "net/url" + "reflect" + + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" +) + +func argsToURLValues(args map[string]interface{}) (url.Values, error) { + values := make(url.Values) + if len(args) == 0 { + return values, nil + } + + err := argsToJSON(args) + if err != nil { + return nil, err + } + + for key, val := range args { + values.Set(key, val.(string)) + } + + return values, nil +} + +func argsToJSON(args map[string]interface{}) error { + for k, v := range args { + rt := reflect.TypeOf(v) + isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 + if isByteSlice { + bytes := reflect.ValueOf(v).Bytes() + args[k] = fmt.Sprintf("0x%X", bytes) + continue + } + + data, err := cmtjson.Marshal(v) + if err != nil { + return err + } + args[k] = string(data) + } + return nil +} diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go new file mode 100644 index 0000000..177f547 --- /dev/null +++ b/rpc/jsonrpc/client/http_json_client.go @@ -0,0 +1,419 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" + "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +const ( + protoHTTP = "http" + protoHTTPS = "https" + protoWSS = "wss" + protoWS = "ws" + protoTCP = "tcp" + protoUNIX = "unix" +) + +//------------------------------------------------------------- + +// Parsed URL structure +type parsedURL struct { + url.URL + + isUnixSocket bool +} + +// Parse URL and set defaults +func newParsedURL(remoteAddr string) (*parsedURL, error) { + u, err := url.Parse(remoteAddr) + if err != nil { + return nil, err + } + + // default to tcp if nothing specified + if u.Scheme == "" { + u.Scheme = protoTCP + } + + pu := &parsedURL{ + URL: *u, + isUnixSocket: false, + } + + if u.Scheme == protoUNIX { + pu.isUnixSocket = true + } + + return pu, nil +} + +// Change protocol to HTTP for unknown protocols and TCP protocol - useful for RPC connections +func (u *parsedURL) SetDefaultSchemeHTTP() { + // protocol to use for http operations, to support both http and https + switch u.Scheme { + case protoHTTP, protoHTTPS, protoWS, protoWSS: + // known protocols not changed + default: + // default to http for unknown protocols (ex. tcp) + u.Scheme = protoHTTP + } +} + +// Get full address without the protocol - useful for Dialer connections +func (u parsedURL) GetHostWithPath() string { + // Remove protocol, userinfo and # fragment, assume opaque is empty + return u.Host + u.EscapedPath() +} + +// Get a trimmed address - useful for WS connections +func (u parsedURL) GetTrimmedHostWithPath() string { + // if it's not an unix socket we return the normal URL + if !u.isUnixSocket { + return u.GetHostWithPath() + } + // if it's a unix socket we replace the host slashes with a period + // this is because otherwise the http.Client would think that the + // domain is invalid. + return strings.ReplaceAll(u.GetHostWithPath(), "/", ".") +} + +// GetDialAddress returns the endpoint to dial for the parsed URL +func (u parsedURL) GetDialAddress() string { + // if it's not a unix socket we return the host, example: localhost:443 + if !u.isUnixSocket { + return u.Host + } + // otherwise we return the path of the unix socket, ex /tmp/socket + return u.GetHostWithPath() +} + +// Get a trimmed address with protocol - useful as address in RPC connections +func (u parsedURL) GetTrimmedURL() string { + return u.Scheme + "://" + u.GetTrimmedHostWithPath() +} + +//------------------------------------------------------------- + +// HTTPClient is a common interface for JSON-RPC HTTP clients. +type HTTPClient interface { + // Call calls the given method with the params and returns a result. + Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) +} + +// Caller implementers can facilitate calling the JSON-RPC endpoint. +type Caller interface { + Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) +} + +//------------------------------------------------------------- + +// Client is a JSON-RPC client, which sends POST HTTP requests to the +// remote server. +// +// Client is safe for concurrent use by multiple goroutines. +type Client struct { + address string + username string + password string + + client *http.Client + + mtx cmtsync.Mutex + nextReqID int +} + +var _ HTTPClient = (*Client)(nil) + +// Both Client and RequestBatch can facilitate calls to the JSON +// RPC endpoint. +var _ Caller = (*Client)(nil) +var _ Caller = (*RequestBatch)(nil) + +var _ fmt.Stringer = (*Client)(nil) + +// New returns a Client pointed at the given address. +// An error is returned on invalid remote. The function panics when remote is nil. +func New(remote string) (*Client, error) { + httpClient, err := DefaultHTTPClient(remote) + if err != nil { + return nil, err + } + return NewWithHTTPClient(remote, httpClient) +} + +// NewWithHTTPClient returns a Client pointed at the given +// address using a custom http client. An error is returned on invalid remote. +// The function panics when remote is nil. +func NewWithHTTPClient(remote string, client *http.Client) (*Client, error) { + if client == nil { + panic("nil http.Client provided") + } + + parsedURL, err := newParsedURL(remote) + if err != nil { + return nil, fmt.Errorf("invalid remote %s: %s", remote, err) + } + + parsedURL.SetDefaultSchemeHTTP() + + address := parsedURL.GetTrimmedURL() + username := parsedURL.User.Username() + password, _ := parsedURL.User.Password() + + rpcClient := &Client{ + address: address, + username: username, + password: password, + client: client, + } + + return rpcClient, nil +} + +// Call issues a POST HTTP request. Requests are JSON encoded. Content-Type: +// application/json. +func (c *Client) Call( + ctx context.Context, + method string, + params map[string]interface{}, + result interface{}, +) (interface{}, error) { + id := c.nextRequestID() + + request, err := types.MapToRequest(id, method, params) + if err != nil { + return nil, fmt.Errorf("failed to encode params: %w", err) + } + + requestBytes, err := json.Marshal(request) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + requestBuf := bytes.NewBuffer(requestBytes) + httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, c.address, requestBuf) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + + httpRequest.Header.Set("Content-Type", "application/json") + + if c.username != "" || c.password != "" { + httpRequest.SetBasicAuth(c.username, c.password) + } + + httpResponse, err := c.client.Do(httpRequest) + if err != nil { + return nil, fmt.Errorf("post failed: %w", err) + } + defer httpResponse.Body.Close() + + responseBytes, err := io.ReadAll(httpResponse.Body) + if err != nil { + return nil, fmt.Errorf("%s. Failed to read response body: %w", getHTTPRespErrPrefix(httpResponse), err) + } + + res, err := unmarshalResponseBytes(responseBytes, id, result) + if err != nil { + return nil, fmt.Errorf("%s. %w", getHTTPRespErrPrefix(httpResponse), err) + } + return res, nil +} + +func getHTTPRespErrPrefix(resp *http.Response) string { + return fmt.Sprintf("error in json rpc client, with http response metadata: (Status: %s, Protocol %s)", resp.Status, resp.Proto) +} + +func (c *Client) String() string { + return fmt.Sprintf("&Client{user=%v, addr=%v, client=%v, nextReqID=%v}", c.username, c.address, c.client, c.nextReqID) +} + +// NewRequestBatch starts a batch of requests for this client. +func (c *Client) NewRequestBatch() *RequestBatch { + return &RequestBatch{ + requests: make([]*jsonRPCBufferedRequest, 0), + client: c, + } +} + +func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedRequest) ([]interface{}, error) { + reqs := make([]types.RPCRequest, 0, len(requests)) + results := make([]interface{}, 0, len(requests)) + for _, req := range requests { + reqs = append(reqs, req.request) + results = append(results, req.result) + } + + // serialize the array of requests into a single JSON object + requestBytes, err := json.Marshal(reqs) + if err != nil { + return nil, fmt.Errorf("json marshal: %w", err) + } + + httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, c.address, bytes.NewBuffer(requestBytes)) + if err != nil { + return nil, fmt.Errorf("new request: %w", err) + } + + httpRequest.Header.Set("Content-Type", "application/json") + + if c.username != "" || c.password != "" { + httpRequest.SetBasicAuth(c.username, c.password) + } + + httpResponse, err := c.client.Do(httpRequest) + if err != nil { + return nil, fmt.Errorf("post: %w", err) + } + + defer httpResponse.Body.Close() + + responseBytes, err := io.ReadAll(httpResponse.Body) + if err != nil { + return nil, fmt.Errorf("read response body: %w", err) + } + + // collect ids to check responses IDs in unmarshalResponseBytesArray + ids := make([]types.JSONRPCIntID, len(requests)) + for i, req := range requests { + ids[i] = req.request.ID.(types.JSONRPCIntID) + } + + return unmarshalResponseBytesArray(responseBytes, ids, results) +} + +func (c *Client) nextRequestID() types.JSONRPCIntID { + c.mtx.Lock() + id := c.nextReqID + c.nextReqID++ + c.mtx.Unlock() + return types.JSONRPCIntID(id) +} + +//------------------------------------------------------------------------------------ + +// jsonRPCBufferedRequest encapsulates a single buffered request, as well as its +// anticipated response structure. +type jsonRPCBufferedRequest struct { + request types.RPCRequest + result interface{} // The result will be deserialized into this object. +} + +// RequestBatch allows us to buffer multiple request/response structures +// into a single batch request. Note that this batch acts like a FIFO queue, and +// is thread-safe. +type RequestBatch struct { + client *Client + + mtx cmtsync.Mutex + requests []*jsonRPCBufferedRequest +} + +// Count returns the number of enqueued requests waiting to be sent. +func (b *RequestBatch) Count() int { + b.mtx.Lock() + defer b.mtx.Unlock() + return len(b.requests) +} + +func (b *RequestBatch) enqueue(req *jsonRPCBufferedRequest) { + b.mtx.Lock() + defer b.mtx.Unlock() + b.requests = append(b.requests, req) +} + +// Clear empties out the request batch. +func (b *RequestBatch) Clear() int { + b.mtx.Lock() + defer b.mtx.Unlock() + return b.clear() +} + +func (b *RequestBatch) clear() int { + count := len(b.requests) + b.requests = make([]*jsonRPCBufferedRequest, 0) + return count +} + +// Send will attempt to send the current batch of enqueued requests, and then +// will clear out the requests once done. On success, this returns the +// deserialized list of results from each of the enqueued requests. +func (b *RequestBatch) Send(ctx context.Context) ([]interface{}, error) { + b.mtx.Lock() + defer func() { + b.clear() + b.mtx.Unlock() + }() + return b.client.sendBatch(ctx, b.requests) +} + +// Call enqueues a request to call the given RPC method with the specified +// parameters, in the same way that the `Client.Call` function would. +func (b *RequestBatch) Call( + _ context.Context, + method string, + params map[string]interface{}, + result interface{}, +) (interface{}, error) { + id := b.client.nextRequestID() + request, err := types.MapToRequest(id, method, params) + if err != nil { + return nil, err + } + b.enqueue(&jsonRPCBufferedRequest{request: request, result: result}) + return result, nil +} + +//------------------------------------------------------------- + +func makeHTTPDialer(remoteAddr string) (func(string, string) (net.Conn, error), error) { + u, err := newParsedURL(remoteAddr) + if err != nil { + return nil, err + } + + protocol := u.Scheme + + // accept http(s) as an alias for tcp + switch protocol { + case protoHTTP, protoHTTPS: + protocol = protoTCP + } + + dialFn := func(proto, addr string) (net.Conn, error) { + return net.Dial(protocol, u.GetDialAddress()) + } + + return dialFn, nil +} + +// DefaultHTTPClient is used to create an http client with some default parameters. +// We overwrite the http.Client.Dial so we can do http over tcp or unix. +// remoteAddr should be fully featured (eg. with tcp:// or unix://). +// An error will be returned in case of invalid remoteAddr. +func DefaultHTTPClient(remoteAddr string) (*http.Client, error) { + dialFn, err := makeHTTPDialer(remoteAddr) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + // Set to true to prevent GZIP-bomb DoS attacks + DisableCompression: true, + Dial: dialFn, + }, + } + + return client, nil +} diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go new file mode 100644 index 0000000..03134df --- /dev/null +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -0,0 +1,86 @@ +package client + +import ( + "io" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHTTPClientMakeHTTPDialer(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte("Hi!\n")) + }) + ts := httptest.NewServer(handler) + defer ts.Close() + + tsTLS := httptest.NewTLSServer(handler) + defer tsTLS.Close() + // This silences a TLS handshake error, caused by the dialer just immediately + // disconnecting, which we can just ignore. + tsTLS.Config.ErrorLog = log.New(io.Discard, "", 0) + + for _, testURL := range []string{ts.URL, tsTLS.URL} { + u, err := newParsedURL(testURL) + require.NoError(t, err) + dialFn, err := makeHTTPDialer(testURL) + require.Nil(t, err) + + addr, err := dialFn(u.Scheme, u.GetHostWithPath()) + require.NoError(t, err) + require.NotNil(t, addr) + } +} + +func Test_parsedURL(t *testing.T) { + type test struct { + url string + expectedURL string + expectedHostWithPath string + expectedDialAddress string + } + + tests := map[string]test{ + "unix endpoint": { + url: "unix:///tmp/test", + expectedURL: "unix://.tmp.test", + expectedHostWithPath: "/tmp/test", + expectedDialAddress: "/tmp/test", + }, + + "http endpoint": { + url: "https://example.com", + expectedURL: "https://example.com", + expectedHostWithPath: "example.com", + expectedDialAddress: "example.com", + }, + + "http endpoint with port": { + url: "https://example.com:8080", + expectedURL: "https://example.com:8080", + expectedHostWithPath: "example.com:8080", + expectedDialAddress: "example.com:8080", + }, + + "http path routed endpoint": { + url: "https://example.com:8080/rpc", + expectedURL: "https://example.com:8080/rpc", + expectedHostWithPath: "example.com:8080/rpc", + expectedDialAddress: "example.com:8080", + }, + } + + for name, tt := range tests { + tt := tt // suppressing linter + t.Run(name, func(t *testing.T) { + parsed, err := newParsedURL(tt.url) + require.NoError(t, err) + require.Equal(t, tt.expectedDialAddress, parsed.GetDialAddress()) + require.Equal(t, tt.expectedURL, parsed.GetTrimmedURL()) + require.Equal(t, tt.expectedHostWithPath, parsed.GetHostWithPath()) + }) + } +} diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go new file mode 100644 index 0000000..91ee07e --- /dev/null +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -0,0 +1,85 @@ +package client + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +const ( + // URIClientRequestID in a request ID used by URIClient + URIClientRequestID = types.JSONRPCIntID(-1) +) + +// URIClient is a JSON-RPC client, which sends POST form HTTP requests to the +// remote server. +// +// URIClient is safe for concurrent use by multiple goroutines. +type URIClient struct { + address string + client *http.Client +} + +var _ HTTPClient = (*URIClient)(nil) + +// NewURI returns a new client. +// An error is returned on invalid remote. +// The function panics when remote is nil. +func NewURI(remote string) (*URIClient, error) { + parsedURL, err := newParsedURL(remote) + if err != nil { + return nil, err + } + + httpClient, err := DefaultHTTPClient(remote) + if err != nil { + return nil, err + } + + parsedURL.SetDefaultSchemeHTTP() + + uriClient := &URIClient{ + address: parsedURL.GetTrimmedURL(), + client: httpClient, + } + + return uriClient, nil +} + +// Call issues a POST form HTTP request. +func (c *URIClient) Call(ctx context.Context, method string, + params map[string]interface{}, result interface{}) (interface{}, error) { + + values, err := argsToURLValues(params) + if err != nil { + return nil, fmt.Errorf("failed to encode params: %w", err) + } + + req, err := http.NewRequestWithContext( + ctx, + http.MethodPost, + c.address+"/"+method, + strings.NewReader(values.Encode()), + ) + if err != nil { + return nil, fmt.Errorf("new request: %w", err) + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("post: %w", err) + } + defer resp.Body.Close() + + responseBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("read response body: %w", err) + } + + return unmarshalResponseBytes(responseBytes, URIClientRequestID, result) +} diff --git a/rpc/jsonrpc/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go new file mode 100644 index 0000000..9e5823f --- /dev/null +++ b/rpc/jsonrpc/client/integration_test.go @@ -0,0 +1,69 @@ +//go:build release +// +build release + +// The code in here is comprehensive as an integration +// test and is long, hence is only run before releases. + +package client + +import ( + "bytes" + "errors" + "net" + "regexp" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +func TestWSClientReconnectWithJitter(t *testing.T) { + n := 8 + maxReconnectAttempts := 3 + // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... + maxSleepTime := time.Second * time.Duration(((1< c.maxReconnectAttempts { + return fmt.Errorf("reached maximum reconnect attempts: %w", err) + } + } +} + +func (c *WSClient) startReadWriteRoutines() { + c.wg.Add(2) + c.readRoutineQuit = make(chan struct{}) + go c.readRoutine() + go c.writeRoutine() +} + +func (c *WSClient) processBacklog() error { + select { + case request := <-c.backlog: + if c.writeWait > 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } + } + if err := c.conn.WriteJSON(request); err != nil { + c.Logger.Error("failed to resend request", "err", err) + c.reconnectAfter <- err + // requeue request + c.backlog <- request + return err + } + c.Logger.Info("resend a request", "req", request) + default: + } + return nil +} + +func (c *WSClient) reconnectRoutine() { + for { + select { + case originalError := <-c.reconnectAfter: + // wait until writeRoutine and readRoutine finish + c.wg.Wait() + if err := c.reconnect(); err != nil { + c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) + if err = c.Stop(); err != nil { + c.Logger.Error("failed to stop conn", "error", err) + } + + return + } + // drain reconnectAfter + LOOP: + for { + select { + case <-c.reconnectAfter: + default: + break LOOP + } + } + err := c.processBacklog() + if err == nil { + c.startReadWriteRoutines() + } + + case <-c.Quit(): + return + } + } +} + +// The client ensures that there is at most one writer to a connection by +// executing all writes from this goroutine. +func (c *WSClient) writeRoutine() { + var ticker *time.Ticker + if c.pingPeriod > 0 { + // ticker with a predefined period + ticker = time.NewTicker(c.pingPeriod) + } else { + // ticker that never fires + ticker = &time.Ticker{C: make(<-chan time.Time)} + } + + defer func() { + ticker.Stop() + c.conn.Close() + // err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + // } + c.wg.Done() + }() + + for { + select { + case request := <-c.send: + if c.writeWait > 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } + } + if err := c.conn.WriteJSON(request); err != nil { + c.Logger.Error("failed to send request", "err", err) + c.reconnectAfter <- err + // add request to the backlog, so we don't lose it + c.backlog <- request + return + } + case <-ticker.C: + if c.writeWait > 0 { + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } + } + if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { + c.Logger.Error("failed to write ping", "err", err) + c.reconnectAfter <- err + return + } + c.mtx.Lock() + c.sentLastPingAt = time.Now() + c.mtx.Unlock() + c.Logger.Debug("sent ping") + case <-c.readRoutineQuit: + return + case <-c.Quit(): + if err := c.conn.WriteMessage( + websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), + ); err != nil { + c.Logger.Error("failed to write message", "err", err) + } + return + } + } +} + +// The client ensures that there is at most one reader to a connection by +// executing all reads from this goroutine. +func (c *WSClient) readRoutine() { + defer func() { + c.conn.Close() + // err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + // } + c.wg.Done() + }() + + c.conn.SetPongHandler(func(string) error { + // gather latency stats + c.mtx.RLock() + t := c.sentLastPingAt + c.mtx.RUnlock() + c.PingPongLatencyTimer.UpdateSince(t) + + c.Logger.Debug("got pong") + return nil + }) + + for { + // reset deadline for every message type (control or data) + if c.readWait > 0 { + if err := c.conn.SetReadDeadline(time.Now().Add(c.readWait)); err != nil { + c.Logger.Error("failed to set read deadline", "err", err) + } + } + _, data, err := c.conn.ReadMessage() + if err != nil { + if !websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure) { + return + } + + c.Logger.Error("failed to read response", "err", err) + close(c.readRoutineQuit) + c.reconnectAfter <- err + return + } + + var response types.RPCResponse + err = json.Unmarshal(data, &response) + if err != nil { + c.Logger.Error("failed to parse response", "err", err, "data", string(data)) + continue + } + + if err = validateResponseID(response.ID); err != nil { + c.Logger.Error("error in response ID", "id", response.ID, "err", err) + continue + } + + // TODO: events resulting from /subscribe do not work with -> + // because they are implemented as responses with the subscribe request's + // ID. According to the spec, they should be notifications (requests + // without IDs). + // https://github.com/tendermint/tendermint/issues/2949 + // c.mtx.Lock() + // if _, ok := c.sentIDs[response.ID.(types.JSONRPCIntID)]; !ok { + // c.Logger.Error("unsolicited response ID", "id", response.ID, "expected", c.sentIDs) + // c.mtx.Unlock() + // continue + // } + // delete(c.sentIDs, response.ID.(types.JSONRPCIntID)) + // c.mtx.Unlock() + // Combine a non-blocking read on BaseService.Quit with a non-blocking write on ResponsesCh to avoid blocking + // c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop + // both readRoutine and writeRoutine + + c.Logger.Info("got response", "id", response.ID, "result", log.NewLazySprintf("%X", response.Result)) + + select { + case <-c.Quit(): + case c.ResponsesCh <- response: + } + } +} + +// Predefined methods + +// Subscribe to a query. Note the server must have a "subscribe" route +// defined. +func (c *WSClient) Subscribe(ctx context.Context, query string) error { + params := map[string]interface{}{"query": query} + return c.Call(ctx, "subscribe", params) +} + +// Unsubscribe from a query. Note the server must have a "unsubscribe" route +// defined. +func (c *WSClient) Unsubscribe(ctx context.Context, query string) error { + params := map[string]interface{}{"query": query} + return c.Call(ctx, "unsubscribe", params) +} + +// UnsubscribeAll from all. Note the server must have a "unsubscribe_all" route +// defined. +func (c *WSClient) UnsubscribeAll(ctx context.Context) error { + params := map[string]interface{}{} + return c.Call(ctx, "unsubscribe_all", params) +} diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go new file mode 100644 index 0000000..8983688 --- /dev/null +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +var wsCallTimeout = 5 * time.Second + +type myHandler struct { + closeConnAfterRead bool + mtx cmtsync.RWMutex +} + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + panic(err) + } + defer conn.Close() + for { + messageType, in, err := conn.ReadMessage() + if err != nil { + return + } + + var req types.RPCRequest + err = json.Unmarshal(in, &req) + if err != nil { + panic(err) + } + + h.mtx.RLock() + if h.closeConnAfterRead { + if err := conn.Close(); err != nil { + panic(err) + } + } + h.mtx.RUnlock() + + res := json.RawMessage(`{}`) + emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res, ID: req.ID}) + if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { + return + } + } +} + +func TestWSClientReconnectsAfterReadFailure(t *testing.T) { + var wg sync.WaitGroup + + // start server + h := &myHandler{} + s := httptest.NewServer(h) + defer s.Close() + + c := startClient(t, "//"+s.Listener.Addr().String()) + defer c.Stop() //nolint:errcheck // ignore for tests + + wg.Add(1) + go callWgDoneOnResult(t, c, &wg) + + h.mtx.Lock() + h.closeConnAfterRead = true + h.mtx.Unlock() + + // results in WS read error, no send retry because write succeeded + call(t, "a", c) + + // expect to reconnect almost immediately + time.Sleep(10 * time.Millisecond) + h.mtx.Lock() + h.closeConnAfterRead = false + h.mtx.Unlock() + + // should succeed + call(t, "b", c) + + wg.Wait() +} + +func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { + var wg sync.WaitGroup + + // start server + h := &myHandler{} + s := httptest.NewServer(h) + + c := startClient(t, "//"+s.Listener.Addr().String()) + defer c.Stop() //nolint:errcheck // ignore for tests + + wg.Add(2) + go callWgDoneOnResult(t, c, &wg) + + // hacky way to abort the connection before write + if err := c.conn.Close(); err != nil { + t.Error(err) + } + + // results in WS write error, the client should resend on reconnect + call(t, "a", c) + + // expect to reconnect almost immediately + time.Sleep(10 * time.Millisecond) + + // should succeed + call(t, "b", c) + + wg.Wait() +} + +func TestWSClientReconnectFailure(t *testing.T) { + // start server + h := &myHandler{} + s := httptest.NewServer(h) + + c := startClient(t, "//"+s.Listener.Addr().String()) + defer c.Stop() //nolint:errcheck // ignore for tests + + go func() { + for { + select { + case <-c.ResponsesCh: + case <-c.Quit(): + return + } + } + }() + + // hacky way to abort the connection before write + if err := c.conn.Close(); err != nil { + t.Error(err) + } + s.Close() + + // results in WS write error + // provide timeout to avoid blocking + ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) + defer cancel() + if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil { + t.Error(err) + } + + // expect to reconnect almost immediately + time.Sleep(10 * time.Millisecond) + + done := make(chan struct{}) + go func() { + // client should block on this + call(t, "b", c) + close(done) + }() + + // test that client blocks on the second send + select { + case <-done: + t.Fatal("client should block on calling 'b' during reconnect") + case <-time.After(5 * time.Second): + t.Log("All good") + } +} + +func TestNotBlockingOnStop(t *testing.T) { + timeout := 2 * time.Second + s := httptest.NewServer(&myHandler{}) + c := startClient(t, "//"+s.Listener.Addr().String()) + c.Call(context.Background(), "a", make(map[string]interface{})) //nolint:errcheck // ignore for tests + // Let the readRoutine get around to blocking + time.Sleep(time.Second) + passCh := make(chan struct{}) + go func() { + // Unless we have a non-blocking write to ResponsesCh from readRoutine + // this blocks forever ont the waitgroup + err := c.Stop() + require.NoError(t, err) + passCh <- struct{}{} + }() + select { + case <-passCh: + // Pass + case <-time.After(timeout): + t.Fatalf("WSClient did failed to stop within %v seconds - is one of the read/write routines blocking?", + timeout.Seconds()) + } +} + +func startClient(t *testing.T, addr string) *WSClient { + c, err := NewWS(addr, "/websocket") + require.Nil(t, err) + err = c.Start() + require.Nil(t, err) + c.SetLogger(log.TestingLogger()) + return c +} + +func call(t *testing.T, method string, c *WSClient) { + err := c.Call(context.Background(), method, make(map[string]interface{})) + require.NoError(t, err) +} + +func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { + for { + select { + case resp := <-c.ResponsesCh: + if resp.Error != nil { + t.Errorf("unexpected error: %v", resp.Error) + return + } + if resp.Result != nil { + wg.Done() + } + case <-c.Quit(): + return + } + } +} diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go new file mode 100644 index 0000000..4da90a9 --- /dev/null +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -0,0 +1,258 @@ +package server + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "sort" + + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" + "github.com/strangelove-ventures/cometbft-client/libs/log" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +// HTTP + JSON handler + +// jsonrpc calls grab the given method's function info and runs reflect.Call +func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + b, err := io.ReadAll(r.Body) + if err != nil { + res := types.RPCInvalidRequestError(nil, + fmt.Errorf("error reading request body: %w", err), + ) + if wErr := WriteRPCResponseHTTPError(w, http.StatusBadRequest, res); wErr != nil { + logger.Error("failed to write response", "err", wErr) + } + return + } + + // if its an empty request (like from a browser), just display a list of + // functions + if len(b) == 0 { + writeListOfEndpoints(w, r, funcMap) + return + } + + // first try to unmarshal the incoming request as an array of RPC requests + var ( + requests []types.RPCRequest + responses []types.RPCResponse + ) + if err := json.Unmarshal(b, &requests); err != nil { + // next, try to unmarshal as a single request + var request types.RPCRequest + if err := json.Unmarshal(b, &request); err != nil { + res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) + if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { + logger.Error("failed to write response", "err", wErr) + } + return + } + requests = []types.RPCRequest{request} + } + + // Set the default response cache to true unless + // 1. Any RPC request error. + // 2. Any RPC request doesn't allow to be cached. + // 3. Any RPC request has the height argument and the value is 0 (the default). + cache := true + for _, request := range requests { + request := request + + // A Notification is a Request object without an "id" member. + // The Server MUST NOT reply to a Notification, including those that are within a batch request. + if request.ID == nil { + logger.Debug( + "HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)", + "req", request, + ) + continue + } + if len(r.URL.Path) > 1 { + responses = append( + responses, + types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), + ) + cache = false + continue + } + rpcFunc, ok := funcMap[request.Method] + if !ok || (rpcFunc.ws) { + responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + cache = false + continue + } + ctx := &types.Context{JSONReq: &request, HTTPReq: r} + args := []reflect.Value{reflect.ValueOf(ctx)} + if len(request.Params) > 0 { + fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) + if err != nil { + responses = append( + responses, + types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + ) + cache = false + continue + } + args = append(args, fnArgs...) + } + + if cache && !rpcFunc.cacheableWithArgs(args) { + cache = false + } + + returns := rpcFunc.f.Call(args) + result, err := unreflectResult(returns) + if err != nil { + responses = append(responses, types.RPCInternalError(request.ID, err)) + continue + } + responses = append(responses, types.NewRPCSuccessResponse(request.ID, result)) + } + + if len(responses) > 0 { + var wErr error + if cache { + wErr = WriteCacheableRPCResponseHTTP(w, responses...) + } else { + wErr = WriteRPCResponseHTTP(w, responses...) + } + if wErr != nil { + logger.Error("failed to write responses", "err", wErr) + } + } + } +} + +func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Since the pattern "/" matches all paths not matched by other registered patterns, + // we check whether the path is indeed "/", otherwise return a 404 error + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + next(w, r) + } +} + +func mapParamsToArgs( + rpcFunc *RPCFunc, + params map[string]json.RawMessage, + argsOffset int, +) ([]reflect.Value, error) { + values := make([]reflect.Value, len(rpcFunc.argNames)) + for i, argName := range rpcFunc.argNames { + argType := rpcFunc.args[i+argsOffset] + + if p, ok := params[argName]; ok && p != nil && len(p) > 0 { + val := reflect.New(argType) + err := cmtjson.Unmarshal(p, val.Interface()) + if err != nil { + return nil, err + } + values[i] = val.Elem() + } else { // use default for that type + values[i] = reflect.Zero(argType) + } + } + + return values, nil +} + +func arrayParamsToArgs( + rpcFunc *RPCFunc, + params []json.RawMessage, + argsOffset int, +) ([]reflect.Value, error) { + if len(rpcFunc.argNames) != len(params) { + return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)", + len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) + } + + values := make([]reflect.Value, len(params)) + for i, p := range params { + argType := rpcFunc.args[i+argsOffset] + val := reflect.New(argType) + err := cmtjson.Unmarshal(p, val.Interface()) + if err != nil { + return nil, err + } + values[i] = val.Elem() + } + return values, nil +} + +// raw is unparsed json (from json.RawMessage) encoding either a map or an +// array. +// +// Example: +// +// rpcFunc.args = [rpctypes.Context string] +// rpcFunc.argNames = ["arg"] +func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) { + const argsOffset = 1 + + // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? + // First, try to get the map. + var m map[string]json.RawMessage + err := json.Unmarshal(raw, &m) + if err == nil { + return mapParamsToArgs(rpcFunc, m, argsOffset) + } + + // Otherwise, try an array. + var a []json.RawMessage + err = json.Unmarshal(raw, &a) + if err == nil { + return arrayParamsToArgs(rpcFunc, a, argsOffset) + } + + // Otherwise, bad format, we cannot parse + return nil, fmt.Errorf("unknown type for JSON params: %v. Expected map or array", err) +} + +// writes a list of available rpc endpoints as an html page +func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[string]*RPCFunc) { + noArgNames := []string{} + argNames := []string{} + for name, funcData := range funcMap { + if len(funcData.args) == 0 { + noArgNames = append(noArgNames, name) + } else { + argNames = append(argNames, name) + } + } + sort.Strings(noArgNames) + sort.Strings(argNames) + buf := new(bytes.Buffer) + buf.WriteString("") + buf.WriteString("
Available endpoints:
") + + for _, name := range noArgNames { + link := fmt.Sprintf("//%s/%s", r.Host, name) + buf.WriteString(fmt.Sprintf("%s
", link, link)) + } + + buf.WriteString("
Endpoints that require arguments:
") + for _, name := range argNames { + link := fmt.Sprintf("//%s/%s?", r.Host, name) + funcData := funcMap[name] + for i, argName := range funcData.argNames { + link += argName + "=_" + if i < len(funcData.argNames)-1 { + link += "&" + } + } + buf.WriteString(fmt.Sprintf("%s
", link, link)) + } + buf.WriteString("") + w.Header().Set("Content-Type", "text/html") + w.WriteHeader(200) + w.Write(buf.Bytes()) //nolint: errcheck +} diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go new file mode 100644 index 0000000..341b041 --- /dev/null +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -0,0 +1,278 @@ +package server + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +func testMux() *http.ServeMux { + funcMap := map[string]*RPCFunc{ + "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", Cacheable("height")), + } + mux := http.NewServeMux() + buf := new(bytes.Buffer) + logger := log.NewTMLogger(buf) + RegisterRPCFuncs(mux, funcMap, logger) + + return mux +} + +func statusOK(code int) bool { return code >= 200 && code <= 299 } + +// Ensure that nefarious/unintended inputs to `params` +// do not crash our RPC handlers. +// See Issue https://github.com/tendermint/tendermint/issues/708. +func TestRPCParams(t *testing.T) { + mux := testMux() + tests := []struct { + payload string + wantErr string + expectedID interface{} + }{ + // bad + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, + // id not captured in JSON parsing failures + {`{"method": "c", "id": "0", "params": a}`, "invalid character", nil}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", types.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", types.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", types.JSONRPCStringID("0")}, + + // no ID - notification + // {`{"jsonrpc": "2.0", "method": "c", "params": ["a", "10"]}`, false, nil}, + + // good + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", types.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": {}}`, "", types.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", types.JSONRPCStringID("0")}, + } + + for i, tt := range tests { + req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + defer res.Body.Close() + // Always expecting back a JSONRPCResponse + assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) + blob, err := io.ReadAll(res.Body) + if err != nil { + t.Errorf("#%d: err reading body: %v", i, err) + continue + } + + recv := new(types.RPCResponse) + assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) + if tt.wantErr == "" { + assert.Nil(t, recv.Error, "#%d: not expecting an error", i) + } else { + assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) + // The wanted error is either in the message or the data + assert.Contains(t, recv.Error.Message+recv.Error.Data, tt.wantErr, "#%d: expected substring", i) + } + } +} + +func TestJSONRPCID(t *testing.T) { + mux := testMux() + tests := []struct { + payload string + wantErr bool + expectedID interface{} + }{ + // good id + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, types.JSONRPCStringID("abc")}, + {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, types.JSONRPCIntID(0)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(-1)}, + + // bad id + {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, nil}, + {`{"jsonrpc": "2.0", "method": "c", "id": [], "params": ["a", "10"]}`, true, nil}, + } + + for i, tt := range tests { + req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + // Always expecting back a JSONRPCResponse + assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) + blob, err := io.ReadAll(res.Body) + if err != nil { + t.Errorf("#%d: err reading body: %v", i, err) + continue + } + res.Body.Close() + + recv := new(types.RPCResponse) + err = json.Unmarshal(blob, recv) + assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + if !tt.wantErr { + assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) + assert.Nil(t, recv.Error, "#%d: not expecting an error", i) + } else { + assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) + } + } +} + +func TestRPCNotification(t *testing.T) { + mux := testMux() + body := strings.NewReader(`{"jsonrpc": "2.0"}`) + req, _ := http.NewRequest("POST", "http://localhost/", body) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + blob, err := io.ReadAll(res.Body) + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") + require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") +} + +func TestRPCNotificationInBatch(t *testing.T) { + mux := testMux() + tests := []struct { + payload string + expectCount int + }{ + { + `[ + {"jsonrpc": "2.0"}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} + ]`, + 1, + }, + { + `[ + {"jsonrpc": "2.0"}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]}, + {"jsonrpc": "2.0"}, + {"jsonrpc": "2.0","method":"c","id":"abc","params":["a","10"]} + ]`, + 2, + }, + } + for i, tt := range tests { + req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + // Always expecting back a JSONRPCResponse + assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) + blob, err := io.ReadAll(res.Body) + if err != nil { + t.Errorf("#%d: err reading body: %v", i, err) + continue + } + res.Body.Close() + + var responses []types.RPCResponse + // try to unmarshal an array first + err = json.Unmarshal(blob, &responses) + if err != nil { + // if we were actually expecting an array, but got an error + if tt.expectCount > 1 { + t.Errorf("#%d: expected an array, couldn't unmarshal it\nblob: %s", i, blob) + continue + } + // we were expecting an error here, so let's unmarshal a single response + var response types.RPCResponse + err = json.Unmarshal(blob, &response) + if err != nil { + t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) + continue + } + // have a single-element result + responses = []types.RPCResponse{response} + } + if tt.expectCount != len(responses) { + t.Errorf("#%d: expected %d response(s), but got %d\nblob: %s", i, tt.expectCount, len(responses), blob) + continue + } + for _, response := range responses { + assert.NotEqual(t, response, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + } + } +} + +func TestUnknownRPCPath(t *testing.T) { + mux := testMux() + req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a 404 error + require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404") + res.Body.Close() +} + +func TestRPCResponseCache(t *testing.T) { + mux := testMux() + body := strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["1"]}`) + req, _ := http.NewRequest("Get", "http://localhost/", body) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "public, max-age=86400", res.Header.Get("Cache-control")) + + _, err := io.ReadAll(res.Body) + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") + + // send a request with default height. + body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`) + req, _ = http.NewRequest("Get", "http://localhost/", body) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res = rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "", res.Header.Get("Cache-control")) + + _, err = io.ReadAll(res.Body) + + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") + + // send a request with default height, but as empty set of parameters. + body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": []}`) + req, _ = http.NewRequest("Get", "http://localhost/", body) + rec = httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res = rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + require.Equal(t, "", res.Header.Get("Cache-control")) + + _, err = io.ReadAll(res.Body) + + res.Body.Close() + require.Nil(t, err, "reading from the body should not give back an error") +} diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go new file mode 100644 index 0000000..6fe2a2a --- /dev/null +++ b/rpc/jsonrpc/server/http_server.go @@ -0,0 +1,279 @@ +// Commons for HTTP handling +package server + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "os" + "runtime/debug" + "strings" + "time" + + "golang.org/x/net/netutil" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +// Config is a RPC server configuration. +type Config struct { + // see netutil.LimitListener + MaxOpenConnections int + // mirrors http.Server#ReadTimeout + ReadTimeout time.Duration + // mirrors http.Server#WriteTimeout + WriteTimeout time.Duration + // MaxBodyBytes controls the maximum number of bytes the + // server will read parsing the request body. + MaxBodyBytes int64 + // mirrors http.Server#MaxHeaderBytes + MaxHeaderBytes int +} + +// DefaultConfig returns a default configuration. +func DefaultConfig() *Config { + return &Config{ + MaxOpenConnections: 0, // unlimited + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxBodyBytes: int64(1000000), // 1MB + MaxHeaderBytes: 1 << 20, // same as the net/http default + } +} + +// Serve creates a http.Server and calls Serve with the given listener. It +// wraps handler with RecoverAndLogHandler and a handler, which limits the max +// body size to config.MaxBodyBytes. +// +// NOTE: This function blocks - you may want to call it in a go-routine. +func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { + logger.Info("serve", "msg", log.NewLazySprintf("Starting RPC HTTP server on %s", listener.Addr())) + s := &http.Server{ + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, + } + err := s.Serve(listener) + logger.Info("RPC HTTP server stopped", "err", err) + return err +} + +// Serve creates a http.Server and calls ServeTLS with the given listener, +// certFile and keyFile. It wraps handler with RecoverAndLogHandler and a +// handler, which limits the max body size to config.MaxBodyBytes. +// +// NOTE: This function blocks - you may want to call it in a go-routine. +func ServeTLS( + listener net.Listener, + handler http.Handler, + certFile, keyFile string, + logger log.Logger, + config *Config, +) error { + logger.Info("serve tls", "msg", log.NewLazySprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", + listener.Addr(), certFile, keyFile)) + s := &http.Server{ + Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + ReadTimeout: config.ReadTimeout, + ReadHeaderTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + MaxHeaderBytes: config.MaxHeaderBytes, + } + err := s.ServeTLS(listener, certFile, keyFile) + + logger.Error("RPC HTTPS server stopped", "err", err) + return err +} + +// WriteRPCResponseHTTPError marshals res as JSON (with indent) and writes it +// to w. +// +// source: https://www.jsonrpc.org/historical/json-rpc-over-http.html +func WriteRPCResponseHTTPError( + w http.ResponseWriter, + httpCode int, + res types.RPCResponse, +) error { + if res.Error == nil { + panic("tried to write http error response without RPC error") + } + + jsonBytes, err := json.Marshal(res) + if err != nil { + return fmt.Errorf("json marshal: %w", err) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(httpCode) + _, err = w.Write(jsonBytes) + return err +} + +// WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. +func WriteRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { + return writeRPCResponseHTTP(w, []httpHeader{}, res...) +} + +// WriteCacheableRPCResponseHTTP marshals res as JSON (with indent) and writes +// it to w. Adds cache-control to the response header and sets the expiry to +// one day. +func WriteCacheableRPCResponseHTTP(w http.ResponseWriter, res ...types.RPCResponse) error { + return writeRPCResponseHTTP(w, []httpHeader{{"Cache-Control", "public, max-age=86400"}}, res...) +} + +type httpHeader struct { + name string + value string +} + +func writeRPCResponseHTTP(w http.ResponseWriter, headers []httpHeader, res ...types.RPCResponse) error { + var v interface{} + if len(res) == 1 { + v = res[0] + } else { + v = res + } + + jsonBytes, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("json marshal: %w", err) + } + w.Header().Set("Content-Type", "application/json") + for _, header := range headers { + w.Header().Set(header.name, header.value) + } + w.WriteHeader(200) + _, err = w.Write(jsonBytes) + return err +} + +//----------------------------------------------------------------------------- + +// RecoverAndLogHandler wraps an HTTP handler, adding error logging. +// If the inner function panics, the outer function recovers, logs, sends an +// HTTP 500 error response. +func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Wrap the ResponseWriter to remember the status + rww := &responseWriterWrapper{-1, w} + begin := time.Now() + + rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) + + defer func() { + // Handle any panics in the panic handler below. Does not use the logger, since we want + // to avoid any further panics. However, we try to return a 500, since it otherwise + // defaults to 200 and there is no other way to terminate the connection. If that + // should panic for whatever reason then the Go HTTP server will handle it and + // terminate the connection - panicing is the de-facto and only way to get the Go HTTP + // server to terminate the request and close the connection/stream: + // https://github.com/golang/go/issues/17790#issuecomment-258481416 + if e := recover(); e != nil { + fmt.Fprintf(os.Stderr, "Panic during RPC panic recovery: %v\n%v\n", e, string(debug.Stack())) + w.WriteHeader(500) + } + }() + + defer func() { + // Send a 500 error if a panic happens during a handler. + // Without this, Chrome & Firefox were retrying aborted ajax requests, + // at least to my localhost. + if e := recover(); e != nil { + // If RPCResponse + if res, ok := e.(types.RPCResponse); ok { + if wErr := WriteRPCResponseHTTP(rww, res); wErr != nil { + logger.Error("failed to write response", "err", wErr) + } + } else { + // Panics can contain anything, attempt to normalize it as an error. + var err error + switch e := e.(type) { + case error: + err = e + case string: + err = errors.New(e) + case fmt.Stringer: + err = errors.New(e.String()) + default: + } + + logger.Error("panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack())) + + res := types.RPCInternalError(types.JSONRPCIntID(-1), err) + if wErr := WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, res); wErr != nil { + logger.Error("failed to write response", "err", wErr) + } + } + } + + // Finally, log. + durationMS := time.Since(begin).Nanoseconds() / 1000000 + if rww.Status == -1 { + rww.Status = 200 + } + logger.Debug("served RPC HTTP response", + "method", r.Method, + "url", r.URL, + "status", rww.Status, + "duration", durationMS, + "remoteAddr", r.RemoteAddr, + ) + }() + + handler.ServeHTTP(rww, r) + }) +} + +// Remember the status for logging +type responseWriterWrapper struct { + Status int + http.ResponseWriter +} + +func (w *responseWriterWrapper) WriteHeader(status int) { + w.Status = status + w.ResponseWriter.WriteHeader(status) +} + +// implements http.Hijacker +func (w *responseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return w.ResponseWriter.(http.Hijacker).Hijack() +} + +type maxBytesHandler struct { + h http.Handler + n int64 +} + +func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, h.n) + h.h.ServeHTTP(w, r) +} + +// Listen starts a new net.Listener on the given address. +// It returns an error if the address is invalid or the call to Listen() fails. +func Listen(addr string, maxOpenConnections int) (listener net.Listener, err error) { + parts := strings.SplitN(addr, "://", 2) + if len(parts) != 2 { + return nil, fmt.Errorf( + "invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", + addr, + ) + } + proto, addr := parts[0], parts[1] + listener, err = net.Listen(proto, addr) + if err != nil { + return nil, fmt.Errorf("failed to listen on %v: %v", addr, err) + } + if maxOpenConnections > 0 { + listener = netutil.LimitListener(listener, maxOpenConnections) + } + + return listener, nil +} diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go new file mode 100644 index 0000000..089c1a2 --- /dev/null +++ b/rpc/jsonrpc/server/http_server_test.go @@ -0,0 +1,155 @@ +package server + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +type sampleResult struct { + Value string `json:"value"` +} + +func TestMaxOpenConnections(t *testing.T) { + const max = 5 // max simultaneous connections + + // Start the server. + var open int32 + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > int32(max) { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + }) + config := DefaultConfig() + l, err := Listen("tcp://127.0.0.1:0", max) + require.NoError(t, err) + defer l.Close() + go Serve(l, mux, log.TestingLogger(), config) //nolint:errcheck // ignore for tests + + // Make N GET calls to the server. + attempts := max * 2 + var wg sync.WaitGroup + var failed int32 + for i := 0; i < attempts; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + }() + } + wg.Wait() + + // We expect some Gets to fail as the server's accept queue is filled, + // but most should succeed. + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) + } +} + +func TestServeTLS(t *testing.T) { + ln, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + defer ln.Close() + + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, "some body") + }) + + chErr := make(chan error, 1) + go func() { + // FIXME This goroutine leaks + chErr <- ServeTLS(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig()) + }() + + select { + case err := <-chErr: + require.NoError(t, err) + case <-time.After(100 * time.Millisecond): + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + c := &http.Client{Transport: tr} + res, err := c.Get("https://" + ln.Addr().String()) + require.NoError(t, err) + defer res.Body.Close() + assert.Equal(t, http.StatusOK, res.StatusCode) + + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + assert.Equal(t, []byte("some body"), body) +} + +func TestWriteRPCResponseHTTP(t *testing.T) { + id := types.JSONRPCIntID(-1) + + // one argument + w := httptest.NewRecorder() + err := WriteCacheableRPCResponseHTTP(w, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) + require.NoError(t, err) + resp := w.Result() + body, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() + require.NoError(t, err) + assert.Equal(t, 200, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, "public, max-age=86400", resp.Header.Get("Cache-control")) + assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}}`, string(body)) + + // multiple arguments + w = httptest.NewRecorder() + err = WriteRPCResponseHTTP(w, + types.NewRPCSuccessResponse(id, &sampleResult{"hello"}), + types.NewRPCSuccessResponse(id, &sampleResult{"world"})) + require.NoError(t, err) + resp = w.Result() + body, err = io.ReadAll(resp.Body) + _ = resp.Body.Close() + require.NoError(t, err) + + assert.Equal(t, 200, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, `[{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}},{"jsonrpc":"2.0","id":-1,"result":{"value":"world"}}]`, string(body)) +} + +func TestWriteRPCResponseHTTPError(t *testing.T) { + w := httptest.NewRecorder() + err := WriteRPCResponseHTTPError( + w, + http.StatusInternalServerError, + types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo"))) + require.NoError(t, err) + resp := w.Result() + body, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() + require.NoError(t, err) + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"error":{"code":-32603,"message":"Internal error","data":"foo"}}`, string(body)) +} diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go new file mode 100644 index 0000000..91b3e31 --- /dev/null +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -0,0 +1,218 @@ +package server + +import ( + "encoding/hex" + "fmt" + "net/http" + "reflect" + "regexp" + "strings" + + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" + "github.com/strangelove-ventures/cometbft-client/libs/log" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +// HTTP + URI handler + +var reInt = regexp.MustCompile(`^-?[0-9]+$`) + +// convert from a function name to the http handler +func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { + // Always return -1 as there's no ID here. + dummyID := types.JSONRPCIntID(-1) // URIClientRequestID + + // Exception for websocket endpoints + if rpcFunc.ws { + return func(w http.ResponseWriter, r *http.Request) { + res := types.RPCMethodNotFoundError(dummyID) + if wErr := WriteRPCResponseHTTPError(w, http.StatusNotFound, res); wErr != nil { + logger.Error("failed to write response", "err", wErr) + } + } + } + + // All other endpoints + return func(w http.ResponseWriter, r *http.Request) { + logger.Debug("HTTP HANDLER", "req", r) + + ctx := &types.Context{HTTPReq: r} + args := []reflect.Value{reflect.ValueOf(ctx)} + + fnArgs, err := httpParamsToArgs(rpcFunc, r) + if err != nil { + res := types.RPCInvalidParamsError(dummyID, + fmt.Errorf("error converting http params to arguments: %w", err), + ) + if wErr := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, res); wErr != nil { + logger.Error("failed to write response", "err", wErr) + } + return + } + args = append(args, fnArgs...) + + returns := rpcFunc.f.Call(args) + + logger.Debug("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) + result, err := unreflectResult(returns) + if err != nil { + if err := WriteRPCResponseHTTPError(w, http.StatusInternalServerError, + types.RPCInternalError(dummyID, err)); err != nil { + logger.Error("failed to write response", "err", err) + return + } + return + } + + resp := types.NewRPCSuccessResponse(dummyID, result) + if rpcFunc.cacheableWithArgs(args) { + err = WriteCacheableRPCResponseHTTP(w, resp) + } else { + err = WriteRPCResponseHTTP(w, resp) + } + if err != nil { + logger.Error("failed to write response", "err", err) + return + } + } +} + +// Covert an http query to a list of properly typed values. +// To be properly decoded the arg must be a concrete type from CometBFT (if its an interface). +func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) { + // skip types.Context + const argsOffset = 1 + + values := make([]reflect.Value, len(rpcFunc.argNames)) + + for i, name := range rpcFunc.argNames { + argType := rpcFunc.args[i+argsOffset] + + values[i] = reflect.Zero(argType) // set default for that type + + arg := getParam(r, name) + // log.Notice("param to arg", "argType", argType, "name", name, "arg", arg) + + if arg == "" { + continue + } + + v, ok, err := nonJSONStringToArg(argType, arg) + if err != nil { + return nil, err + } + if ok { + values[i] = v + continue + } + + values[i], err = jsonStringToArg(argType, arg) + if err != nil { + return nil, err + } + } + + return values, nil +} + +func jsonStringToArg(rt reflect.Type, arg string) (reflect.Value, error) { + rv := reflect.New(rt) + err := cmtjson.Unmarshal([]byte(arg), rv.Interface()) + if err != nil { + return rv, err + } + rv = rv.Elem() + return rv, nil +} + +func nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { + if rt.Kind() == reflect.Ptr { + rv1, ok, err := nonJSONStringToArg(rt.Elem(), arg) + switch { + case err != nil: + return reflect.Value{}, false, err + case ok: + rv := reflect.New(rt.Elem()) + rv.Elem().Set(rv1) + return rv, true, nil + default: + return reflect.Value{}, false, nil + } + } else { + return _nonJSONStringToArg(rt, arg) + } +} + +// NOTE: rt.Kind() isn't a pointer. +func _nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { + isIntString := reInt.Match([]byte(arg)) + isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) + isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") + + var expectingString, expectingByteSlice, expectingInt bool + switch rt.Kind() { + case reflect.Int, + reflect.Uint, + reflect.Int8, + reflect.Uint8, + reflect.Int16, + reflect.Uint16, + reflect.Int32, + reflect.Uint32, + reflect.Int64, + reflect.Uint64: + expectingInt = true + case reflect.String: + expectingString = true + case reflect.Slice: + expectingByteSlice = rt.Elem().Kind() == reflect.Uint8 + } + + if isIntString && expectingInt { + qarg := `"` + arg + `"` + rv, err := jsonStringToArg(rt, qarg) + if err != nil { + return rv, false, err + } + + return rv, true, nil + } + + if isHexString { + if !expectingString && !expectingByteSlice { + err := fmt.Errorf("got a hex string arg, but expected '%s'", + rt.Kind().String()) + return reflect.ValueOf(nil), false, err + } + + var value []byte + value, err := hex.DecodeString(arg[2:]) + if err != nil { + return reflect.ValueOf(nil), false, err + } + if rt.Kind() == reflect.String { + return reflect.ValueOf(string(value)), true, nil + } + return reflect.ValueOf(value), true, nil + } + + if isQuotedString && expectingByteSlice { + v := reflect.New(reflect.TypeOf("")) + err := cmtjson.Unmarshal([]byte(arg), v.Interface()) + if err != nil { + return reflect.ValueOf(nil), false, err + } + v = v.Elem() + return reflect.ValueOf([]byte(v.String())), true, nil + } + + return reflect.ValueOf(nil), false, nil +} + +func getParam(r *http.Request, param string) string { + s := r.URL.Query().Get(param) + if s == "" { + s = r.FormValue(param) + } + return s +} diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go new file mode 100644 index 0000000..a920024 --- /dev/null +++ b/rpc/jsonrpc/server/parse_test.go @@ -0,0 +1,213 @@ +package server + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/strangelove-ventures/cometbft-client/libs/bytes" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +func TestParseJSONMap(t *testing.T) { + input := []byte(`{"value":"1234","height":22}`) + + // naive is float,string + var p1 map[string]interface{} + err := json.Unmarshal(input, &p1) + if assert.Nil(t, err) { + h, ok := p1["height"].(float64) + if assert.True(t, ok, "%#v", p1["height"]) { + assert.EqualValues(t, 22, h) + } + v, ok := p1["value"].(string) + if assert.True(t, ok, "%#v", p1["value"]) { + assert.EqualValues(t, "1234", v) + } + } + + // preloading map with values doesn't help + tmp := 0 + p2 := map[string]interface{}{ + "value": &bytes.HexBytes{}, + "height": &tmp, + } + err = json.Unmarshal(input, &p2) + if assert.Nil(t, err) { + h, ok := p2["height"].(float64) + if assert.True(t, ok, "%#v", p2["height"]) { + assert.EqualValues(t, 22, h) + } + v, ok := p2["value"].(string) + if assert.True(t, ok, "%#v", p2["value"]) { + assert.EqualValues(t, "1234", v) + } + } + + // preload here with *pointers* to the desired types + // struct has unknown types, but hard-coded keys + tmp = 0 + p3 := struct { + Value interface{} `json:"value"` + Height interface{} `json:"height"` + }{ + Height: &tmp, + Value: &bytes.HexBytes{}, + } + err = json.Unmarshal(input, &p3) + if assert.Nil(t, err) { + h, ok := p3.Height.(*int) + if assert.True(t, ok, "%#v", p3.Height) { + assert.Equal(t, 22, *h) + } + v, ok := p3.Value.(*bytes.HexBytes) + if assert.True(t, ok, "%#v", p3.Value) { + assert.EqualValues(t, []byte{0x12, 0x34}, *v) + } + } + + // simplest solution, but hard-coded + p4 := struct { + Value bytes.HexBytes `json:"value"` + Height int `json:"height"` + }{} + err = json.Unmarshal(input, &p4) + if assert.Nil(t, err) { + assert.EqualValues(t, 22, p4.Height) + assert.EqualValues(t, []byte{0x12, 0x34}, p4.Value) + } + + // so, let's use this trick... + // dynamic keys on map, and we can deserialize to the desired types + var p5 map[string]*json.RawMessage + err = json.Unmarshal(input, &p5) + if assert.Nil(t, err) { + var h int + err = json.Unmarshal(*p5["height"], &h) + if assert.Nil(t, err) { + assert.Equal(t, 22, h) + } + + var v bytes.HexBytes + err = json.Unmarshal(*p5["value"], &v) + if assert.Nil(t, err) { + assert.Equal(t, bytes.HexBytes{0x12, 0x34}, v) + } + } +} + +func TestParseJSONArray(t *testing.T) { + input := []byte(`["1234",22]`) + + // naive is float,string + var p1 []interface{} + err := json.Unmarshal(input, &p1) + if assert.Nil(t, err) { + v, ok := p1[0].(string) + if assert.True(t, ok, "%#v", p1[0]) { + assert.EqualValues(t, "1234", v) + } + h, ok := p1[1].(float64) + if assert.True(t, ok, "%#v", p1[1]) { + assert.EqualValues(t, 22, h) + } + } + + // preloading map with values helps here (unlike map - p2 above) + tmp := 0 + p2 := []interface{}{&bytes.HexBytes{}, &tmp} + err = json.Unmarshal(input, &p2) + if assert.Nil(t, err) { + v, ok := p2[0].(*bytes.HexBytes) + if assert.True(t, ok, "%#v", p2[0]) { + assert.EqualValues(t, []byte{0x12, 0x34}, *v) + } + h, ok := p2[1].(*int) + if assert.True(t, ok, "%#v", p2[1]) { + assert.EqualValues(t, 22, *h) + } + } +} + +func TestParseJSONRPC(t *testing.T) { + demo := func(ctx *types.Context, height int, name string) {} + call := NewRPCFunc(demo, "height,name") + + cases := []struct { + raw string + height int64 + name string + fail bool + }{ + // should parse + {`["7", "flew"]`, 7, "flew", false}, + {`{"name": "john", "height": "22"}`, 22, "john", false}, + // defaults + {`{"name": "solo", "unused": "stuff"}`, 0, "solo", false}, + // should fail - wrong types/length + {`["flew", 7]`, 0, "", true}, + {`[7,"flew",100]`, 0, "", true}, + {`{"name": -12, "height": "fred"}`, 0, "", true}, + } + for idx, tc := range cases { + i := strconv.Itoa(idx) + data := []byte(tc.raw) + vals, err := jsonParamsToArgs(call, data) + if tc.fail { + assert.NotNil(t, err, i) + } else { + assert.Nil(t, err, "%s: %+v", i, err) + if assert.Equal(t, 2, len(vals), i) { + assert.Equal(t, tc.height, vals[0].Int(), i) + assert.Equal(t, tc.name, vals[1].String(), i) + } + } + + } +} + +func TestParseURI(t *testing.T) { + demo := func(ctx *types.Context, height int, name string) {} + call := NewRPCFunc(demo, "height,name") + + cases := []struct { + raw []string + height int64 + name string + fail bool + }{ + // can parse numbers unquoted and strings quoted + {[]string{"7", `"flew"`}, 7, "flew", false}, + {[]string{"22", `"john"`}, 22, "john", false}, + {[]string{"-10", `"bob"`}, -10, "bob", false}, + // can parse numbers quoted, too + {[]string{`"7"`, `"flew"`}, 7, "flew", false}, + {[]string{`"-10"`, `"bob"`}, -10, "bob", false}, + // cant parse strings uquoted + {[]string{`"-10"`, `bob`}, -10, "bob", true}, + } + for idx, tc := range cases { + i := strconv.Itoa(idx) + // data := []byte(tc.raw) + url := fmt.Sprintf( + "test.com/method?height=%v&name=%v", + tc.raw[0], tc.raw[1]) + req, err := http.NewRequest("GET", url, nil) + assert.NoError(t, err) + vals, err := httpParamsToArgs(call, req) + if tc.fail { + assert.NotNil(t, err, i) + } else { + assert.Nil(t, err, "%s: %+v", i, err) + if assert.Equal(t, 2, len(vals), i) { + assert.Equal(t, tc.height, vals[0].Int(), i) + assert.Equal(t, tc.name, vals[1].String(), i) + } + } + + } +} diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go new file mode 100644 index 0000000..217c27a --- /dev/null +++ b/rpc/jsonrpc/server/rpc_func.go @@ -0,0 +1,154 @@ +package server + +import ( + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/strangelove-ventures/cometbft-client/libs/log" +) + +// RegisterRPCFuncs adds a route for each function in the funcMap, as well as +// general jsonrpc and websocket handlers for all functions. "result" is the +// interface on which the result objects are registered, and is popualted with +// every RPCResponse +func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) { + // HTTP endpoints + for funcName, rpcFunc := range funcMap { + mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, logger)) + } + + // JSONRPC endpoints + mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) +} + +type Option func(*RPCFunc) + +// Cacheable enables returning a cache control header from RPC functions to +// which it is applied. +// +// `noCacheDefArgs` is a list of argument names that, if omitted or set to +// their defaults when calling the RPC function, will skip the response +// caching. +func Cacheable(noCacheDefArgs ...string) Option { + return func(r *RPCFunc) { + r.cacheable = true + r.noCacheDefArgs = make(map[string]interface{}) + for _, arg := range noCacheDefArgs { + r.noCacheDefArgs[arg] = nil + } + } +} + +// Ws enables WebSocket communication. +func Ws() Option { + return func(r *RPCFunc) { + r.ws = true + } +} + +// RPCFunc contains the introspected type information for a function +type RPCFunc struct { + f reflect.Value // underlying rpc function + args []reflect.Type // type of each function arg + returns []reflect.Type // type of each return arg + argNames []string // name of each argument + cacheable bool // enable cache control + ws bool // enable websocket communication + noCacheDefArgs map[string]interface{} // a lookup table of args that, if not supplied or are set to default values, cause us to not cache +} + +// NewRPCFunc wraps a function for introspection. +// f is the function, args are comma separated argument names +func NewRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { + return newRPCFunc(f, args, options...) +} + +// NewWSRPCFunc wraps a function for introspection and use in the websockets. +func NewWSRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { + options = append(options, Ws()) + return newRPCFunc(f, args, options...) +} + +// cacheableWithArgs returns whether or not a call to this function is cacheable, +// given the specified arguments. +func (f *RPCFunc) cacheableWithArgs(args []reflect.Value) bool { + if !f.cacheable { + return false + } + // Skip the context variable common to all RPC functions + for i := 1; i < len(f.args); i++ { + // f.argNames does not include the context variable + argName := f.argNames[i-1] + if _, hasDefault := f.noCacheDefArgs[argName]; hasDefault { + // Argument with default value was not supplied + if i >= len(args) { + return false + } + // Argument with default value is set to its zero value + if args[i].IsZero() { + return false + } + } + } + return true +} + +func newRPCFunc(f interface{}, args string, options ...Option) *RPCFunc { + var argNames []string + if args != "" { + argNames = strings.Split(args, ",") + } + + r := &RPCFunc{ + f: reflect.ValueOf(f), + args: funcArgTypes(f), + returns: funcReturnTypes(f), + argNames: argNames, + } + + for _, opt := range options { + opt(r) + } + + return r +} + +// return a function's argument types +func funcArgTypes(f interface{}) []reflect.Type { + t := reflect.TypeOf(f) + n := t.NumIn() + typez := make([]reflect.Type, n) + for i := 0; i < n; i++ { + typez[i] = t.In(i) + } + return typez +} + +// return a function's return types +func funcReturnTypes(f interface{}) []reflect.Type { + t := reflect.TypeOf(f) + n := t.NumOut() + typez := make([]reflect.Type, n) + for i := 0; i < n; i++ { + typez[i] = t.Out(i) + } + return typez +} + +//------------------------------------------------------------- + +// NOTE: assume returns is result struct and error. If error is not nil, return it +func unreflectResult(returns []reflect.Value) (interface{}, error) { + errV := returns[1] + if errV.Interface() != nil { + return nil, fmt.Errorf("%v", errV.Interface()) + } + rv := returns[0] + // the result is a registered interface, + // we need a pointer to it so we can marshal with type byte + rvp := reflect.New(rv.Type()) + rvp.Elem().Set(rv) + return rvp.Interface(), nil +} diff --git a/rpc/jsonrpc/server/test.crt b/rpc/jsonrpc/server/test.crt new file mode 100644 index 0000000..e4ab196 --- /dev/null +++ b/rpc/jsonrpc/server/test.crt @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEODCCAiCgAwIBAgIQWDHUrd4tOM2xExWhzOEJ7DANBgkqhkiG9w0BAQsFADAZ +MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy +MDIxMTAyMDRaMBExDzANBgNVBAMTBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANBaa6dc9GZcIhAHWqVrx0LONYf+IlbvTP7yrV45ws0ix8TX +1NUOiDY1cwzKH8ay/HYX45e2fFLrtLidc9h+apsC55k3Vdcy00+Ksr/adjR8D4A/ +GpnTS+hVDHTlqINe9a7USok34Zr1rc3fh4Imu5RxEurjMwkA/36k6+OpXMp2qlKY +S1fGqwn2KGhXkp/yTWZILEMXBazNxGx4xfqYXzWm6boeyJAXpM2DNkv7dtwa/CWY +WacUQJApNInwn5+B8LLoo+pappkfZOjAD9/aHKsyFTSWmmWeg7V//ouB3u5vItqf +GP+3xmPgeYeEyOIe/P2f8bRuQs+GGwSCmi6F1GUCAwEAAaOBgzCBgDAOBgNVHQ8B +Af8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQW +BBSpBFIMbkBR4xVYQZtUJQQwzPmbHjAfBgNVHSMEGDAWgBTUkz3u+N2iMe6yKb5+ +R1d4CeM9YTAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4ICAQBCqdzS +tPHkMYWjYs6aREwob9whjyG8a4Qp6IkP1SYHCwpzsTeWLi9ybEcDRb3jZ4iRxbZg +7GFxjqHoWgBZHAIyICMsHupOJEtXq5hx86NuMwk/12bx1eNj0yTIAnVOA+em/ZtB +zR38OwB8xXmjKd0Ow1Y7zCh5zE2gU+sR0JOJSfxXUZrJvwDNrbcmZPQ+kwuq4cyv +fxZnvZf/owbyOLQFdbiPQbbiZ7JSv8q7GCMleULCEygrsWClYkULUByhKykCHJIU +wfq1owge9EqG/4CDCCjB9vBFmUyv3FJhgWnzd6tPQckFoHSoD0Bjsv/pQFcsGLcg ++e/Mm6hZgCXXwI2WHYbxqz5ToOaRQQYo6N77jWejOBMecOZmPDyQ2nz73aJd11GW +NiDT7pyMlBJA8W4wAvVP4ow2ugqsPjqZ6EyismIGFUTqMp+NtXOsLPK+sEMhKhJ9 +ulczRpPEf25roBt6aEk2fTAfAPmbpvNamBLSbBU23mzJ38RmfhxLOlOgCGbBBX4d +kE+/+En8UJO4X8CKaKRo/c5G2UZ6++2cjp6SPrsGENDMW5yBGegrDw+ow8/bLxIr +OjWpSe2cygovy3aHE6UBOgkxw9KIaSEqFgjQZ0i+xO6l6qQoljQgUGXfecVMR+7C +4KsyVVTMlK9/thA7Zfc8a5z8ZCtIKkT52XsJhw== +-----END CERTIFICATE----- diff --git a/rpc/jsonrpc/server/test.key b/rpc/jsonrpc/server/test.key new file mode 100644 index 0000000..bb9af06 --- /dev/null +++ b/rpc/jsonrpc/server/test.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEoQIBAAKCAQEA0Fprp1z0ZlwiEAdapWvHQs41h/4iVu9M/vKtXjnCzSLHxNfU +1Q6INjVzDMofxrL8dhfjl7Z8Uuu0uJ1z2H5qmwLnmTdV1zLTT4qyv9p2NHwPgD8a +mdNL6FUMdOWog171rtRKiTfhmvWtzd+Hgia7lHES6uMzCQD/fqTr46lcynaqUphL +V8arCfYoaFeSn/JNZkgsQxcFrM3EbHjF+phfNabpuh7IkBekzYM2S/t23Br8JZhZ +pxRAkCk0ifCfn4Hwsuij6lqmmR9k6MAP39ocqzIVNJaaZZ6DtX/+i4He7m8i2p8Y +/7fGY+B5h4TI4h78/Z/xtG5Cz4YbBIKaLoXUZQIDAQABAoH/NodzpVmunRt/zrIe +By0t+U3+tJjOY/I9NHxO41o6oXV40wupqBkljQpwEejUaCxv5nhaGFqqLwmBQs/y +gbaUL/2Sn4bb8HZc13R1U8DZLuNJK0dYrumd9DBOEkoI0FkJ87ebyk3VvbiOxFK8 +JFP+w9rUGKVdtf2M4JhJJEwu/M2Yawx9/8CrCIY2G6ufaylrIysLeQMsxrogF8n4 +hq7fyqveWRzxhqUxS2fp9Ynpx4jnd1lMzv+z3i8eEsW+gB9yke7UkXZMbtZg1xfB +JjiEfcDVfSwSihhgOYttgQ9hkIdohDUak7OzRSWVBuoxWUhMfrQxw/HZlgZJL9Vf +rGdlAoGBANOGmgEGky+acV33WTWGV5OdAw6B/SlBEoORJbj6UzQiUz3hFH/Tgpbj +JOKHWGbGd8OtOYbt9JoofGlNgHA/4nAEYAc2HGa+q0fBwMUflU0DudAxXis4jDmE +D76moGmyJoSgwVrp1W/vwNixA5RpcZ3Wst2nf9RKLr+DxypHTit/AoGBAPwpDeqc +rwXOTl0KR/080Nc11Z03VIVZAGfA59J73HmADF9bBVlmReQdkwX0lERchdzD0lfa +XqbqBLr4FS5Uqyn5f3DCaMnOeKfvtGw2z6LnY+w03mii4PEW/vNKLlB18NdduPwL +KeAc08Zh+qJFMKD1PoEQOH+Y7NybBbaQL8IbAoGAfPPUYaq6o7I+Kd4FysKTVVW5 +CobrP8V65FGH0R++qttkBPfDHkeZqvx/O3nsVLoE4YigpP5IMhCcfbAUoTp7zuQm +vdvPJzqW/4qLD2c60QXUbBHdqPZ8jzVd/6d6tzVP36T+02+yb69XYiofDTrErRK5 +EorxzjwMJYH40xbQLI0CgYBh7d/FucwPSSwN3ixPIQtKSVIImLBuiT4rDTP6/reF +SEGF1ueg7KNAEGxE59OdKQGj1zkdfWU9Fa14n1g6gg9nYcoolJf1qAYb0nAThsXk +0lBwL6ggowERIIkrGygZf3Rlb7SjzgIZU5i7dtnLo2tbV2NK5G3MwCtdEaeKWzzw ++QKBgQC7+JPHoqbnNgis2vCGLKMOU3HpJK/rYEU/8ZUegc9lshEFZYsRbtKQQJQs +nqsChrG8UoK84frujEBkO/Nzsil85p8ar79wZguGnVvswTWaTuKvl8H/qQQ/JSHZ +OHGQD4qwTCkdRr8Vf8NfuCoZlJDnHncLJZNWjrb5feqCnJ/YIQ== +-----END RSA PRIVATE KEY----- diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go new file mode 100644 index 0000000..80d7d5e --- /dev/null +++ b/rpc/jsonrpc/server/ws_handler.go @@ -0,0 +1,458 @@ +package server + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + "runtime/debug" + "time" + + "github.com/gorilla/websocket" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + "github.com/strangelove-ventures/cometbft-client/libs/service" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +// WebSocket handler + +const ( + defaultWSWriteChanCapacity = 100 + defaultWSWriteWait = 10 * time.Second + defaultWSReadWait = 30 * time.Second + defaultWSPingPeriod = (defaultWSReadWait * 9) / 10 +) + +// WebsocketManager provides a WS handler for incoming connections and passes a +// map of functions along with any additional params to new connections. +// NOTE: The websocket path is defined externally, e.g. in node/node.go +type WebsocketManager struct { + websocket.Upgrader + + funcMap map[string]*RPCFunc + logger log.Logger + wsConnOptions []func(*wsConnection) +} + +// NewWebsocketManager returns a new WebsocketManager that passes a map of +// functions, connection options and logger to new WS connections. +func NewWebsocketManager( + funcMap map[string]*RPCFunc, + wsConnOptions ...func(*wsConnection), +) *WebsocketManager { + return &WebsocketManager{ + funcMap: funcMap, + Upgrader: websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + // TODO ??? + // + // The default behavior would be relevant to browser-based clients, + // afaik. I suppose having a pass-through is a workaround for allowing + // for more complex security schemes, shifting the burden of + // AuthN/AuthZ outside the CometBFT RPC. + // I can't think of other uses right now that would warrant a TODO + // though. The real backstory of this TODO shall remain shrouded in + // mystery + return true + }, + }, + logger: log.NewNopLogger(), + wsConnOptions: wsConnOptions, + } +} + +// SetLogger sets the logger. +func (wm *WebsocketManager) SetLogger(l log.Logger) { + wm.logger = l +} + +// WebsocketHandler upgrades the request/response (via http.Hijack) and starts +// the wsConnection. +func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Request) { + wsConn, err := wm.Upgrade(w, r, nil) + if err != nil { + // TODO - return http error + wm.logger.Error("Failed to upgrade connection", "err", err) + return + } + defer func() { + if err := wsConn.Close(); err != nil { + wm.logger.Error("Failed to close connection", "err", err) + } + }() + + // register connection + con := newWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) + con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) + wm.logger.Info("New websocket connection", "remote", con.remoteAddr) + err = con.Start() // BLOCKING + if err != nil { + wm.logger.Error("Failed to start connection", "err", err) + return + } + if err := con.Stop(); err != nil { + wm.logger.Error("error while stopping connection", "error", err) + } +} + +// WebSocket connection + +// A single websocket connection contains listener id, underlying ws +// connection, and the event switch for subscribing to events. +// +// In case of an error, the connection is stopped. +type wsConnection struct { + service.BaseService + + remoteAddr string + baseConn *websocket.Conn + // writeChan is never closed, to allow WriteRPCResponse() to fail. + writeChan chan types.RPCResponse + + // chan, which is closed when/if readRoutine errors + // used to abort writeRoutine + readRoutineQuit chan struct{} + + funcMap map[string]*RPCFunc + + // write channel capacity + writeChanCapacity int + + // each write times out after this. + writeWait time.Duration + + // Connection times out if we haven't received *anything* in this long, not even pings. + readWait time.Duration + + // Send pings to server with this period. Must be less than readWait, but greater than zero. + pingPeriod time.Duration + + // Maximum message size. + readLimit int64 + + // callback which is called upon disconnect + onDisconnect func(remoteAddr string) + + ctx context.Context + cancel context.CancelFunc +} + +// NewWSConnection wraps websocket.Conn. +// +// See the commentary on the func(*wsConnection) functions for a detailed +// description of how to configure ping period and pong wait time. NOTE: if the +// write buffer is full, pongs may be dropped, which may cause clients to +// disconnect. see https://github.com/gorilla/websocket/issues/97 +func newWSConnection( + baseConn *websocket.Conn, + funcMap map[string]*RPCFunc, + options ...func(*wsConnection), +) *wsConnection { + wsc := &wsConnection{ + remoteAddr: baseConn.RemoteAddr().String(), + baseConn: baseConn, + funcMap: funcMap, + writeWait: defaultWSWriteWait, + writeChanCapacity: defaultWSWriteChanCapacity, + readWait: defaultWSReadWait, + pingPeriod: defaultWSPingPeriod, + readRoutineQuit: make(chan struct{}), + } + for _, option := range options { + option(wsc) + } + wsc.baseConn.SetReadLimit(wsc.readLimit) + wsc.BaseService = *service.NewBaseService(nil, "wsConnection", wsc) + return wsc +} + +// OnDisconnect sets a callback which is used upon disconnect - not +// Goroutine-safe. Nop by default. +func OnDisconnect(onDisconnect func(remoteAddr string)) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.onDisconnect = onDisconnect + } +} + +// WriteWait sets the amount of time to wait before a websocket write times out. +// It should only be used in the constructor - not Goroutine-safe. +func WriteWait(writeWait time.Duration) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.writeWait = writeWait + } +} + +// WriteChanCapacity sets the capacity of the websocket write channel. +// It should only be used in the constructor - not Goroutine-safe. +func WriteChanCapacity(cap int) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.writeChanCapacity = cap + } +} + +// ReadWait sets the amount of time to wait before a websocket read times out. +// It should only be used in the constructor - not Goroutine-safe. +func ReadWait(readWait time.Duration) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.readWait = readWait + } +} + +// PingPeriod sets the duration for sending websocket pings. +// It should only be used in the constructor - not Goroutine-safe. +func PingPeriod(pingPeriod time.Duration) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.pingPeriod = pingPeriod + } +} + +// ReadLimit sets the maximum size for reading message. +// It should only be used in the constructor - not Goroutine-safe. +func ReadLimit(readLimit int64) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.readLimit = readLimit + } +} + +// OnStart implements service.Service by starting the read and write routines. It +// blocks until there's some error. +func (wsc *wsConnection) OnStart() error { + wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) + + // Read subscriptions/unsubscriptions to events + go wsc.readRoutine() + // Write responses, BLOCKING. + wsc.writeRoutine() + + return nil +} + +// OnStop implements service.Service by unsubscribing remoteAddr from all +// subscriptions. +func (wsc *wsConnection) OnStop() { + if wsc.onDisconnect != nil { + wsc.onDisconnect(wsc.remoteAddr) + } + + if wsc.ctx != nil { + wsc.cancel() + } +} + +// GetRemoteAddr returns the remote address of the underlying connection. +// It implements WSRPCConnection +func (wsc *wsConnection) GetRemoteAddr() string { + return wsc.remoteAddr +} + +// WriteRPCResponse pushes a response to the writeChan, and blocks until it is +// accepted. +// It implements WSRPCConnection. It is Goroutine-safe. +func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCResponse) error { + select { + case <-wsc.Quit(): + return errors.New("connection was stopped") + case <-ctx.Done(): + return ctx.Err() + case wsc.writeChan <- resp: + return nil + } +} + +// TryWriteRPCResponse attempts to push a response to the writeChan, but does +// not block. +// It implements WSRPCConnection. It is Goroutine-safe +func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { + select { + case <-wsc.Quit(): + return false + case wsc.writeChan <- resp: + return true + default: + return false + } +} + +// Context returns the connection's context. +// The context is canceled when the client's connection closes. +func (wsc *wsConnection) Context() context.Context { + if wsc.ctx != nil { + return wsc.ctx + } + wsc.ctx, wsc.cancel = context.WithCancel(context.Background()) + return wsc.ctx +} + +// Read from the socket and subscribe to or unsubscribe from events +func (wsc *wsConnection) readRoutine() { + // readRoutine will block until response is written or WS connection is closed + writeCtx := context.Background() + + defer func() { + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = fmt.Errorf("WSJSONRPC: %v", r) + } + wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) + if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(types.JSONRPCIntID(-1), err)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } + go wsc.readRoutine() + } + }() + + wsc.baseConn.SetPongHandler(func(m string) error { + return wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)) + }) + + for { + select { + case <-wsc.Quit(): + return + default: + // reset deadline for every type of message (control or data) + if err := wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)); err != nil { + wsc.Logger.Error("failed to set read deadline", "err", err) + } + + _, r, err := wsc.baseConn.NextReader() + if err != nil { + if websocket.IsCloseError(err, websocket.CloseNormalClosure) { + wsc.Logger.Info("Client closed the connection") + } else { + wsc.Logger.Error("Failed to read request", "err", err) + } + if err := wsc.Stop(); err != nil { + wsc.Logger.Error("Error closing websocket connection", "err", err) + } + close(wsc.readRoutineQuit) + return + } + + dec := json.NewDecoder(r) + var request types.RPCRequest + err = dec.Decode(&request) + if err != nil { + if err := wsc.WriteRPCResponse(writeCtx, + types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } + continue + } + + // A Notification is a Request object without an "id" member. + // The Server MUST NOT reply to a Notification, including those that are within a batch request. + if request.ID == nil { + wsc.Logger.Debug( + "WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)", + "req", request, + ) + continue + } + + // Now, fetch the RPCFunc and execute it. + rpcFunc := wsc.funcMap[request.Method] + if rpcFunc == nil { + if err := wsc.WriteRPCResponse(writeCtx, types.RPCMethodNotFoundError(request.ID)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } + continue + } + + ctx := &types.Context{JSONReq: &request, WSConn: wsc} + args := []reflect.Value{reflect.ValueOf(ctx)} + if len(request.Params) > 0 { + fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) + if err != nil { + if err := wsc.WriteRPCResponse(writeCtx, + types.RPCInternalError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + ); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } + continue + } + args = append(args, fnArgs...) + } + + returns := rpcFunc.f.Call(args) + + // TODO: Need to encode args/returns to string if we want to log them + wsc.Logger.Info("WSJSONRPC", "method", request.Method) + + result, err := unreflectResult(returns) + if err != nil { + if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(request.ID, err)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } + continue + } + + if err := wsc.WriteRPCResponse(writeCtx, types.NewRPCSuccessResponse(request.ID, result)); err != nil { + wsc.Logger.Error("Error writing RPC response", "err", err) + } + } + } +} + +// receives on a write channel and writes out on the socket +func (wsc *wsConnection) writeRoutine() { + pingTicker := time.NewTicker(wsc.pingPeriod) + defer pingTicker.Stop() + + // https://github.com/gorilla/websocket/issues/97 + pongs := make(chan string, 1) + wsc.baseConn.SetPingHandler(func(m string) error { + select { + case pongs <- m: + default: + } + return nil + }) + + for { + select { + case <-wsc.Quit(): + return + case <-wsc.readRoutineQuit: // error in readRoutine + return + case m := <-pongs: + err := wsc.writeMessageWithDeadline(websocket.PongMessage, []byte(m)) + if err != nil { + wsc.Logger.Info("Failed to write pong (client may disconnect)", "err", err) + } + case <-pingTicker.C: + err := wsc.writeMessageWithDeadline(websocket.PingMessage, []byte{}) + if err != nil { + wsc.Logger.Error("Failed to write ping", "err", err) + return + } + case msg := <-wsc.writeChan: + // Use json.MarshalIndent instead of Marshal for pretty output. + // Pretty output not necessary, since most consumers of WS events are + // automated processes, not humans. + jsonBytes, err := json.Marshal(msg) + if err != nil { + wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) + continue + } + if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { + wsc.Logger.Error("Failed to write response", "err", err, "msg", msg) + return + } + } + } +} + +// All writes to the websocket must (re)set the write deadline. +// If some writes don't set it while others do, they may timeout incorrectly +// (https://github.com/tendermint/tendermint/issues/553) +func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { + if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil { + return err + } + return wsc.baseConn.WriteMessage(msgType, msg) +} diff --git a/rpc/jsonrpc/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go new file mode 100644 index 0000000..901250a --- /dev/null +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -0,0 +1,56 @@ +package server + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" + + "github.com/strangelove-ventures/cometbft-client/libs/log" + types "github.com/strangelove-ventures/cometbft-client/rpc/jsonrpc/types" +) + +func TestWebsocketManagerHandler(t *testing.T) { + s := newWSServer() + defer s.Close() + + // check upgrader works + d := websocket.Dialer{} + c, dialResp, err := d.Dial("ws://"+s.Listener.Addr().String()+"/websocket", nil) + require.NoError(t, err) + + if got, want := dialResp.StatusCode, http.StatusSwitchingProtocols; got != want { + t.Errorf("dialResp.StatusCode = %q, want %q", got, want) + } + + // check basic functionality works + req, err := types.MapToRequest( + types.JSONRPCStringID("TestWebsocketManager"), + "c", + map[string]interface{}{"s": "a", "i": 10}, + ) + require.NoError(t, err) + err = c.WriteJSON(req) + require.NoError(t, err) + + var resp types.RPCResponse + err = c.ReadJSON(&resp) + require.NoError(t, err) + require.Nil(t, resp.Error) + dialResp.Body.Close() +} + +func newWSServer() *httptest.Server { + funcMap := map[string]*RPCFunc{ + "c": NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + } + wm := NewWebsocketManager(funcMap) + wm.SetLogger(log.TestingLogger()) + + mux := http.NewServeMux() + mux.HandleFunc("/websocket", wm.WebsocketHandler) + + return httptest.NewServer(mux) +} diff --git a/rpc/jsonrpc/types/types.go b/rpc/jsonrpc/types/types.go new file mode 100644 index 0000000..b4845d0 --- /dev/null +++ b/rpc/jsonrpc/types/types.go @@ -0,0 +1,327 @@ +package types + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" +) + +// a wrapper to emulate a sum type: jsonrpcid = string | int +// TODO: refactor when Go 2.0 arrives https://github.com/golang/go/issues/19412 +type jsonrpcid interface { + isJSONRPCID() +} + +// JSONRPCStringID a wrapper for JSON-RPC string IDs +type JSONRPCStringID string + +func (JSONRPCStringID) isJSONRPCID() {} +func (id JSONRPCStringID) String() string { return string(id) } + +// JSONRPCIntID a wrapper for JSON-RPC integer IDs +type JSONRPCIntID int + +func (JSONRPCIntID) isJSONRPCID() {} +func (id JSONRPCIntID) String() string { return fmt.Sprintf("%d", id) } + +func idFromInterface(idInterface interface{}) (jsonrpcid, error) { + switch id := idInterface.(type) { + case string: + return JSONRPCStringID(id), nil + case float64: + // json.Unmarshal uses float64 for all numbers + // (https://golang.org/pkg/encoding/json/#Unmarshal), + // but the JSONRPC2.0 spec says the id SHOULD NOT contain + // decimals - so we truncate the decimals here. + return JSONRPCIntID(int(id)), nil + default: + typ := reflect.TypeOf(id) + return nil, fmt.Errorf("json-rpc ID (%v) is of unknown type (%v)", id, typ) + } +} + +//---------------------------------------- +// REQUEST + +type RPCRequest struct { + JSONRPC string `json:"jsonrpc"` + ID jsonrpcid `json:"id,omitempty"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} +} + +// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int +func (req *RPCRequest) UnmarshalJSON(data []byte) error { + unsafeReq := struct { + JSONRPC string `json:"jsonrpc"` + ID interface{} `json:"id,omitempty"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} + }{} + + err := json.Unmarshal(data, &unsafeReq) + if err != nil { + return err + } + + if unsafeReq.ID == nil { // notification + return nil + } + + req.JSONRPC = unsafeReq.JSONRPC + req.Method = unsafeReq.Method + req.Params = unsafeReq.Params + id, err := idFromInterface(unsafeReq.ID) + if err != nil { + return err + } + req.ID = id + + return nil +} + +func NewRPCRequest(id jsonrpcid, method string, params json.RawMessage) RPCRequest { + return RPCRequest{ + JSONRPC: "2.0", + ID: id, + Method: method, + Params: params, + } +} + +func (req RPCRequest) String() string { + return fmt.Sprintf("RPCRequest{%s %s/%X}", req.ID, req.Method, req.Params) +} + +func MapToRequest(id jsonrpcid, method string, params map[string]interface{}) (RPCRequest, error) { + var paramsMap = make(map[string]json.RawMessage, len(params)) + for name, value := range params { + valueJSON, err := cmtjson.Marshal(value) + if err != nil { + return RPCRequest{}, err + } + paramsMap[name] = valueJSON + } + + payload, err := json.Marshal(paramsMap) + if err != nil { + return RPCRequest{}, err + } + + return NewRPCRequest(id, method, payload), nil +} + +func ArrayToRequest(id jsonrpcid, method string, params []interface{}) (RPCRequest, error) { + var paramsMap = make([]json.RawMessage, len(params)) + for i, value := range params { + valueJSON, err := cmtjson.Marshal(value) + if err != nil { + return RPCRequest{}, err + } + paramsMap[i] = valueJSON + } + + payload, err := json.Marshal(paramsMap) + if err != nil { + return RPCRequest{}, err + } + + return NewRPCRequest(id, method, payload), nil +} + +//---------------------------------------- +// RESPONSE + +type RPCError struct { + Code int `json:"code"` + Message string `json:"message"` + Data string `json:"data,omitempty"` +} + +func (err RPCError) Error() string { + const baseFormat = "RPC error %v - %s" + if err.Data != "" { + return fmt.Sprintf(baseFormat+": %s", err.Code, err.Message, err.Data) + } + return fmt.Sprintf(baseFormat, err.Code, err.Message) +} + +type RPCResponse struct { + JSONRPC string `json:"jsonrpc"` + ID jsonrpcid `json:"id,omitempty"` + Result json.RawMessage `json:"result,omitempty"` + Error *RPCError `json:"error,omitempty"` +} + +// UnmarshalJSON custom JSON unmarshalling due to jsonrpcid being string or int +func (resp *RPCResponse) UnmarshalJSON(data []byte) error { + unsafeResp := &struct { + JSONRPC string `json:"jsonrpc"` + ID interface{} `json:"id,omitempty"` + Result json.RawMessage `json:"result,omitempty"` + Error *RPCError `json:"error,omitempty"` + }{} + err := json.Unmarshal(data, &unsafeResp) + if err != nil { + return err + } + resp.JSONRPC = unsafeResp.JSONRPC + resp.Error = unsafeResp.Error + resp.Result = unsafeResp.Result + if unsafeResp.ID == nil { + return nil + } + id, err := idFromInterface(unsafeResp.ID) + if err != nil { + return err + } + resp.ID = id + return nil +} + +func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse { + var rawMsg json.RawMessage + + if res != nil { + var js []byte + js, err := cmtjson.Marshal(res) + if err != nil { + return RPCInternalError(id, fmt.Errorf("error marshaling response: %w", err)) + } + rawMsg = json.RawMessage(js) + } + + return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} +} + +func NewRPCErrorResponse(id jsonrpcid, code int, msg string, data string) RPCResponse { + return RPCResponse{ + JSONRPC: "2.0", + ID: id, + Error: &RPCError{Code: code, Message: msg, Data: data}, + } +} + +func (resp RPCResponse) String() string { + if resp.Error == nil { + return fmt.Sprintf("RPCResponse{%s %X}", resp.ID, resp.Result) + } + return fmt.Sprintf("RPCResponse{%s %v}", resp.ID, resp.Error) +} + +// From the JSON-RPC 2.0 spec: +// +// If there was an error in detecting the id in the Request object (e.g. Parse +// error/Invalid Request), it MUST be Null. +func RPCParseError(err error) RPCResponse { + return NewRPCErrorResponse(nil, -32700, "Parse error. Invalid JSON", err.Error()) +} + +// From the JSON-RPC 2.0 spec: +// +// If there was an error in detecting the id in the Request object (e.g. Parse +// error/Invalid Request), it MUST be Null. +func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse { + return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) +} + +func RPCMethodNotFoundError(id jsonrpcid) RPCResponse { + return NewRPCErrorResponse(id, -32601, "Method not found", "") +} + +func RPCInvalidParamsError(id jsonrpcid, err error) RPCResponse { + return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error()) +} + +func RPCInternalError(id jsonrpcid, err error) RPCResponse { + return NewRPCErrorResponse(id, -32603, "Internal error", err.Error()) +} + +func RPCServerError(id jsonrpcid, err error) RPCResponse { + return NewRPCErrorResponse(id, -32000, "Server error", err.Error()) +} + +//---------------------------------------- + +// WSRPCConnection represents a websocket connection. +type WSRPCConnection interface { + // GetRemoteAddr returns a remote address of the connection. + GetRemoteAddr() string + // WriteRPCResponse writes the response onto connection (BLOCKING). + WriteRPCResponse(context.Context, RPCResponse) error + // TryWriteRPCResponse tries to write the response onto connection (NON-BLOCKING). + TryWriteRPCResponse(RPCResponse) bool + // Context returns the connection's context. + Context() context.Context +} + +// Context is the first parameter for all functions. It carries a json-rpc +// request, http request and websocket connection. +// +// - JSONReq is non-nil when JSONRPC is called over websocket or HTTP. +// - WSConn is non-nil when we're connected via a websocket. +// - HTTPReq is non-nil when URI or JSONRPC is called over HTTP. +type Context struct { + // json-rpc request + JSONReq *RPCRequest + // websocket connection + WSConn WSRPCConnection + // http request + HTTPReq *http.Request +} + +// RemoteAddr returns the remote address (usually a string "IP:port"). +// If neither HTTPReq nor WSConn is set, an empty string is returned. +// HTTP: +// +// http.Request#RemoteAddr +// +// WS: +// +// result of GetRemoteAddr +func (ctx *Context) RemoteAddr() string { + if ctx.HTTPReq != nil { + return ctx.HTTPReq.RemoteAddr + } else if ctx.WSConn != nil { + return ctx.WSConn.GetRemoteAddr() + } + return "" +} + +// Context returns the request's context. +// The returned context is always non-nil; it defaults to the background context. +// HTTP: +// +// The context is canceled when the client's connection closes, the request +// is canceled (with HTTP/2), or when the ServeHTTP method returns. +// +// WS: +// +// The context is canceled when the client's connections closes. +func (ctx *Context) Context() context.Context { + if ctx.HTTPReq != nil { + return ctx.HTTPReq.Context() + } else if ctx.WSConn != nil { + return ctx.WSConn.Context() + } + return context.Background() +} + +//---------------------------------------- +// SOCKETS + +// Determine if its a unix or tcp socket. +// If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port +// TODO: deprecate +func SocketType(listenAddr string) string { + socketType := "unix" + if len(strings.Split(listenAddr, ":")) >= 2 { + socketType = "tcp" + } + return socketType +} diff --git a/rpc/jsonrpc/types/types_test.go b/rpc/jsonrpc/types/types_test.go new file mode 100644 index 0000000..8434f20 --- /dev/null +++ b/rpc/jsonrpc/types/types_test.go @@ -0,0 +1,83 @@ +package types + +import ( + "encoding/json" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +type SampleResult struct { + Value string +} + +type responseTest struct { + id jsonrpcid + expected string +} + +var responseTests = []responseTest{ + {JSONRPCStringID("1"), `"1"`}, + {JSONRPCStringID("alphabet"), `"alphabet"`}, + {JSONRPCStringID(""), `""`}, + {JSONRPCStringID("àáâ"), `"àáâ"`}, + {JSONRPCIntID(-1), "-1"}, + {JSONRPCIntID(0), "0"}, + {JSONRPCIntID(1), "1"}, + {JSONRPCIntID(100), "100"}, +} + +func TestResponses(t *testing.T) { + assert := assert.New(t) + for _, tt := range responseTests { + jsonid := tt.id + a := NewRPCSuccessResponse(jsonid, &SampleResult{"hello"}) + b, _ := json.Marshal(a) + s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected) + assert.Equal(s, string(b)) + + d := RPCParseError(errors.New("hello world")) + e, _ := json.Marshal(d) + f := `{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"hello world"}}` + assert.Equal(f, string(e)) + + g := RPCMethodNotFoundError(jsonid) + h, _ := json.Marshal(g) + i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found"}}`, tt.expected) + assert.Equal(string(h), i) + } +} + +func TestUnmarshallResponses(t *testing.T) { + assert := assert.New(t) + for _, tt := range responseTests { + response := &RPCResponse{} + err := json.Unmarshal( + []byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected)), + response, + ) + assert.Nil(err) + a := NewRPCSuccessResponse(tt.id, &SampleResult{"hello"}) + assert.Equal(*response, a) + } + response := &RPCResponse{} + err := json.Unmarshal([]byte(`{"jsonrpc":"2.0","id":true,"result":{"Value":"hello"}}`), response) + assert.NotNil(err) +} + +func TestRPCError(t *testing.T) { + assert.Equal(t, "RPC error 12 - Badness: One worse than a code 11", + fmt.Sprintf("%v", &RPCError{ + Code: 12, + Message: "Badness", + Data: "One worse than a code 11", + })) + + assert.Equal(t, "RPC error 12 - Badness", + fmt.Sprintf("%v", &RPCError{ + Code: 12, + Message: "Badness", + })) +} diff --git a/types/block.go b/types/block.go new file mode 100644 index 0000000..e106e4b --- /dev/null +++ b/types/block.go @@ -0,0 +1,134 @@ +package types + +import ( + "time" + + cmtbytes "github.com/strangelove-ventures/cometbft-client/libs/bytes" + cmtsync "github.com/strangelove-ventures/cometbft-client/libs/sync" +) + +// Block defines the atomic unit of a CometBFT blockchain. +type Block struct { + mtx cmtsync.Mutex + + Header `json:"header"` + Data `json:"data"` + Evidence EvidenceData `json:"evidence"` + LastCommit *Commit `json:"last_commit"` +} + +//----------------------------------------------------------------------------- + +// Header defines the structure of a CometBFT block header. +// NOTE: changes to the Header should be duplicated in: +// - header.Hash() +// - abci.Header +// - https://github.com/cometbft/cometbft/blob/v0.38.x/spec/blockchain/blockchain.md +type Header struct { + // basic block info + //Version cmtversion.Consensus `json:"version"` + ChainID string `json:"chain_id"` + Height int64 `json:"height"` + Time time.Time `json:"time"` + + // prev block info + LastBlockID BlockID `json:"last_block_id"` + + // hashes of block data + LastCommitHash cmtbytes.HexBytes `json:"last_commit_hash"` // commit from validators from the last block + DataHash cmtbytes.HexBytes `json:"data_hash"` // transactions + + // hashes from the app output from the prev block + ValidatorsHash cmtbytes.HexBytes `json:"validators_hash"` // validators for the current block + NextValidatorsHash cmtbytes.HexBytes `json:"next_validators_hash"` // validators for the next block + ConsensusHash cmtbytes.HexBytes `json:"consensus_hash"` // consensus params for current block + AppHash cmtbytes.HexBytes `json:"app_hash"` // state after txs from the previous block + // root hash of all results from the txs from the previous block + // see `deterministicExecTxResult` to understand which parts of a tx is hashed into here + LastResultsHash cmtbytes.HexBytes `json:"last_results_hash"` + + // consensus info + EvidenceHash cmtbytes.HexBytes `json:"evidence_hash"` // evidence included in the block + ProposerAddress Address `json:"proposer_address"` // original proposer of the block +} + +//------------------------------------- + +// BlockIDFlag indicates which BlockID the signature is for. +type BlockIDFlag byte + +// CommitSig is a part of the Vote included in a Commit. +type CommitSig struct { + BlockIDFlag BlockIDFlag `json:"block_id_flag"` + ValidatorAddress Address `json:"validator_address"` + Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` +} + +//------------------------------------- + +// ExtendedCommitSig contains a commit signature along with its corresponding +// vote extension and vote extension signature. +type ExtendedCommitSig struct { + CommitSig // Commit signature + Extension []byte // Vote extension + ExtensionSignature []byte // Vote extension signature +} + +//------------------------------------- + +// Commit contains the evidence that a block was committed by a set of validators. +// NOTE: Commit is empty for height 1, but never nil. +type Commit struct { + // NOTE: The signatures are in order of address to preserve the bonded + // ValidatorSet order. + // Any peer with a block can gossip signatures by index with a peer without + // recalculating the active ValidatorSet. + Height int64 `json:"height"` + Round int32 `json:"round"` + BlockID BlockID `json:"block_id"` + Signatures []CommitSig `json:"signatures"` +} + +//------------------------------------- + +// ExtendedCommit is similar to Commit, except that its signatures also retain +// their corresponding vote extensions and vote extension signatures. +type ExtendedCommit struct { + Height int64 + Round int32 + BlockID BlockID + ExtendedSignatures []ExtendedCommitSig +} + +//------------------------------------- + +// Data contains the set of transactions included in the block +type Data struct { + // Txs that will be applied by state @ block.Height+1. + // NOTE: not all txs here are valid. We're just agreeing on the order first. + // This means that block.AppHash does not include these txs. + Txs Txs `json:"txs"` + + // Volatile + hash cmtbytes.HexBytes +} + +//----------------------------------------------------------------------------- + +// EvidenceData contains any evidence of malicious wrong-doing by validators +type EvidenceData struct { + Evidence EvidenceList `json:"evidence"` + + // Volatile. Used as cache + hash cmtbytes.HexBytes + byteSize int64 +} + +//-------------------------------------------------------------------------------- + +// BlockID +type BlockID struct { + Hash cmtbytes.HexBytes `json:"hash"` + PartSetHeader PartSetHeader `json:"parts"` +} diff --git a/types/block_meta.go b/types/block_meta.go new file mode 100644 index 0000000..31d5ba4 --- /dev/null +++ b/types/block_meta.go @@ -0,0 +1,9 @@ +package types + +// BlockMeta contains meta information. +type BlockMeta struct { + BlockID BlockID `json:"block_id"` + BlockSize int `json:"block_size"` + Header Header `json:"header"` + NumTxs int `json:"num_txs"` +} diff --git a/types/encoding_helper.go b/types/encoding_helper.go new file mode 100644 index 0000000..19a7f0f --- /dev/null +++ b/types/encoding_helper.go @@ -0,0 +1,47 @@ +package types + +import ( + gogotypes "github.com/cosmos/gogoproto/types" + + "github.com/strangelove-ventures/cometbft-client/libs/bytes" +) + +// cdcEncode returns nil if the input is nil, otherwise returns +// proto.Marshal(Value{Value: item}) +func cdcEncode(item interface{}) []byte { + if item != nil && !isTypedNil(item) && !isEmpty(item) { + switch item := item.(type) { + case string: + i := gogotypes.StringValue{ + Value: item, + } + bz, err := i.Marshal() + if err != nil { + return nil + } + return bz + case int64: + i := gogotypes.Int64Value{ + Value: item, + } + bz, err := i.Marshal() + if err != nil { + return nil + } + return bz + case bytes.HexBytes: + i := gogotypes.BytesValue{ + Value: item, + } + bz, err := i.Marshal() + if err != nil { + return nil + } + return bz + default: + return nil + } + } + + return nil +} diff --git a/types/events.go b/types/events.go new file mode 100644 index 0000000..8cd35a4 --- /dev/null +++ b/types/events.go @@ -0,0 +1,6 @@ +package types + +// TMEventData implements events.EventData. +type TMEventData interface { + // empty interface +} diff --git a/types/evidence.go b/types/evidence.go new file mode 100644 index 0000000..b2c6f3d --- /dev/null +++ b/types/evidence.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "time" + + "github.com/strangelove-ventures/cometbft-client/crypto/merkle" +) + +// Evidence represents any provable malicious activity by a validator. +// Verification logic for each evidence is part of the evidence module. +type Evidence interface { + Bytes() []byte // bytes which comprise the evidence + Hash() []byte // hash of the evidence + Height() int64 // height of the infraction + String() string // string format of the evidence + Time() time.Time // time of the infraction + ValidateBasic() error // basic consistency check +} + +// EvidenceList is a list of Evidence. Evidences is not a word. +type EvidenceList []Evidence + +// Hash returns the simple merkle root hash of the EvidenceList. +func (evl EvidenceList) Hash() []byte { + // These allocations are required because Evidence is not of type Bytes, and + // golang slices can't be typed cast. This shouldn't be a performance problem since + // the Evidence size is capped. + evidenceBzs := make([][]byte, len(evl)) + for i := 0; i < len(evl); i++ { + // TODO: We should change this to the hash. Using bytes contains some unexported data that + // may cause different hashes + evidenceBzs[i] = evl[i].Bytes() + } + return merkle.HashFromByteSlices(evidenceBzs) +} + +func (evl EvidenceList) String() string { + s := "" + for _, e := range evl { + s += fmt.Sprintf("%s\t\t", e) + } + return s +} diff --git a/types/genesis.go b/types/genesis.go new file mode 100644 index 0000000..29f587c --- /dev/null +++ b/types/genesis.go @@ -0,0 +1,106 @@ +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/strangelove-ventures/cometbft-client/crypto" + cmtbytes "github.com/strangelove-ventures/cometbft-client/libs/bytes" + cmtjson "github.com/strangelove-ventures/cometbft-client/libs/json" + cmtos "github.com/strangelove-ventures/cometbft-client/libs/os" + cmttime "github.com/strangelove-ventures/cometbft-client/types/time" +) + +const ( + // MaxChainIDLen is a maximum length of the chain ID. + MaxChainIDLen = 50 +) + +//------------------------------------------------------------ +// core types for a genesis definition +// NOTE: any changes to the genesis definition should +// be reflected in the documentation: +// docs/core/using-cometbft.md + +// GenesisValidator is an initial validator. +type GenesisValidator struct { + Address Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + Power int64 `json:"power"` + Name string `json:"name"` +} + +// GenesisDoc defines the initial conditions for a CometBFT blockchain, in particular its validator set. +type GenesisDoc struct { + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + InitialHeight int64 `json:"initial_height"` + ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators,omitempty"` + AppHash cmtbytes.HexBytes `json:"app_hash"` + AppState json.RawMessage `json:"app_state,omitempty"` +} + +// SaveAs is a utility method for saving GenensisDoc as a JSON file. +func (genDoc *GenesisDoc) SaveAs(file string) error { + genDocBytes, err := cmtjson.MarshalIndent(genDoc, "", " ") + if err != nil { + return err + } + return cmtos.WriteFile(file, genDocBytes, 0644) +} + +// ValidatorHash returns the hash of the validator set contained in the GenesisDoc +func (genDoc *GenesisDoc) ValidatorHash() []byte { + //vals := make([]*Validator, len(genDoc.Validators)) + //for i, v := range genDoc.Validators { + // vals[i] = NewValidator(v.PubKey, v.Power) + //} + //vset := NewValidatorSet(vals) + //return vset.Hash() + return []byte{} +} + +// ValidateAndComplete checks that all necessary fields are present +// and fills in defaults for optional fields left empty +func (genDoc *GenesisDoc) ValidateAndComplete() error { + if genDoc.ChainID == "" { + return errors.New("genesis doc must include non-empty chain_id") + } + if len(genDoc.ChainID) > MaxChainIDLen { + return fmt.Errorf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) + } + if genDoc.InitialHeight < 0 { + return fmt.Errorf("initial_height cannot be negative (got %v)", genDoc.InitialHeight) + } + if genDoc.InitialHeight == 0 { + genDoc.InitialHeight = 1 + } + + if genDoc.ConsensusParams == nil { + genDoc.ConsensusParams = DefaultConsensusParams() + } else if err := genDoc.ConsensusParams.ValidateBasic(); err != nil { + return err + } + + for i, v := range genDoc.Validators { + if v.Power == 0 { + return fmt.Errorf("the genesis file cannot contain validators with no voting power: %v", v) + } + if len(v.Address) > 0 && !bytes.Equal(v.PubKey.Address(), v.Address) { + return fmt.Errorf("incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) + } + if len(v.Address) == 0 { + genDoc.Validators[i].Address = v.PubKey.Address() + } + } + + if genDoc.GenesisTime.IsZero() { + genDoc.GenesisTime = cmttime.Now() + } + + return nil +} diff --git a/types/light.go b/types/light.go new file mode 100644 index 0000000..5202b1a --- /dev/null +++ b/types/light.go @@ -0,0 +1,15 @@ +package types + +// LightBlock is a SignedHeader and a ValidatorSet. +// It is the basis of the light client +type LightBlock struct { + *SignedHeader `json:"signed_header"` + ValidatorSet *ValidatorSet `json:"validator_set"` +} + +// SignedHeader is a header along with the commits that prove it. +type SignedHeader struct { + *Header `json:"header"` + + Commit *Commit `json:"commit"` +} diff --git a/types/params.go b/types/params.go new file mode 100644 index 0000000..751f409 --- /dev/null +++ b/types/params.go @@ -0,0 +1,198 @@ +package types + +import ( + "errors" + "fmt" + "time" + + "github.com/strangelove-ventures/cometbft-client/crypto/ed25519" + "github.com/strangelove-ventures/cometbft-client/crypto/secp256k1" +) + +const ( + // MaxBlockSizeBytes is the maximum permitted size of the blocks. + MaxBlockSizeBytes = 104857600 // 100MB + + ABCIPubKeyTypeEd25519 = ed25519.KeyType + ABCIPubKeyTypeSecp256k1 = secp256k1.KeyType +) + +var ABCIPubKeyTypesToNames = map[string]string{ + ABCIPubKeyTypeEd25519: ed25519.PubKeyName, + ABCIPubKeyTypeSecp256k1: secp256k1.PubKeyName, +} + +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. +type ConsensusParams struct { + Block BlockParams `json:"block"` + Evidence EvidenceParams `json:"evidence"` + Validator ValidatorParams `json:"validator"` + Version VersionParams `json:"version"` + ABCI ABCIParams `json:"abci"` +} + +// BlockParams define limits on the block size and gas plus minimum time +// between blocks. +type BlockParams struct { + MaxBytes int64 `json:"max_bytes"` + MaxGas int64 `json:"max_gas"` +} + +// EvidenceParams determine how we handle evidence of malfeasance. +type EvidenceParams struct { + MaxAgeNumBlocks int64 `json:"max_age_num_blocks"` // only accept new evidence more recent than this + MaxAgeDuration time.Duration `json:"max_age_duration"` + MaxBytes int64 `json:"max_bytes"` +} + +// ValidatorParams restrict the public key types validators can use. +// NOTE: uses ABCI pubkey naming, not Amino names. +type ValidatorParams struct { + PubKeyTypes []string `json:"pub_key_types"` +} + +type VersionParams struct { + App uint64 `json:"app"` +} + +// ABCIParams configure ABCI functionality specific to the Application Blockchain +// Interface. +type ABCIParams struct { + VoteExtensionsEnableHeight int64 `json:"vote_extensions_enable_height"` +} + +// VoteExtensionsEnabled returns true if vote extensions are enabled at height h +// and false otherwise. +func (a ABCIParams) VoteExtensionsEnabled(h int64) bool { + if h < 1 { + panic(fmt.Errorf("cannot check if vote extensions enabled for height %d (< 1)", h)) + } + if a.VoteExtensionsEnableHeight == 0 { + return false + } + return a.VoteExtensionsEnableHeight <= h +} + +// DefaultConsensusParams returns a default ConsensusParams. +func DefaultConsensusParams() *ConsensusParams { + return &ConsensusParams{ + Block: DefaultBlockParams(), + Evidence: DefaultEvidenceParams(), + Validator: DefaultValidatorParams(), + Version: DefaultVersionParams(), + ABCI: DefaultABCIParams(), + } +} + +// DefaultBlockParams returns a default BlockParams. +func DefaultBlockParams() BlockParams { + return BlockParams{ + MaxBytes: 22020096, // 21MB + MaxGas: -1, + } +} + +// DefaultEvidenceParams returns a default EvidenceParams. +func DefaultEvidenceParams() EvidenceParams { + return EvidenceParams{ + MaxAgeNumBlocks: 100000, // 27.8 hrs at 1block/s + MaxAgeDuration: 48 * time.Hour, + MaxBytes: 1048576, // 1MB + } +} + +// DefaultValidatorParams returns a default ValidatorParams, which allows +// only ed25519 pubkeys. +func DefaultValidatorParams() ValidatorParams { + return ValidatorParams{ + PubKeyTypes: []string{ABCIPubKeyTypeEd25519}, + } +} + +func DefaultVersionParams() VersionParams { + return VersionParams{ + App: 0, + } +} + +func DefaultABCIParams() ABCIParams { + return ABCIParams{ + // When set to 0, vote extensions are not required. + VoteExtensionsEnableHeight: 0, + } +} + +func IsValidPubkeyType(params ValidatorParams, pubkeyType string) bool { + for i := 0; i < len(params.PubKeyTypes); i++ { + if params.PubKeyTypes[i] == pubkeyType { + return true + } + } + return false +} + +// Validate validates the ConsensusParams to ensure all values are within their +// allowed limits, and returns an error if they are not. +func (params ConsensusParams) ValidateBasic() error { + if params.Block.MaxBytes == 0 { + return fmt.Errorf("block.MaxBytes cannot be 0") + } + if params.Block.MaxBytes < -1 { + return fmt.Errorf("block.MaxBytes must be -1 or greater than 0. Got %d", + + params.Block.MaxBytes) + } + if params.Block.MaxBytes > MaxBlockSizeBytes { + return fmt.Errorf("block.MaxBytes is too big. %d > %d", + params.Block.MaxBytes, MaxBlockSizeBytes) + } + + if params.Block.MaxGas < -1 { + return fmt.Errorf("block.MaxGas must be greater or equal to -1. Got %d", + params.Block.MaxGas) + } + + if params.Evidence.MaxAgeNumBlocks <= 0 { + return fmt.Errorf("evidence.MaxAgeNumBlocks must be greater than 0. Got %d", + params.Evidence.MaxAgeNumBlocks) + } + + if params.Evidence.MaxAgeDuration <= 0 { + return fmt.Errorf("evidence.MaxAgeDuration must be grater than 0 if provided, Got %v", + params.Evidence.MaxAgeDuration) + } + + maxBytes := params.Block.MaxBytes + if maxBytes == -1 { + maxBytes = int64(MaxBlockSizeBytes) + } + if params.Evidence.MaxBytes > maxBytes { + return fmt.Errorf("evidence.MaxBytesEvidence is greater than upper bound, %d > %d", + params.Evidence.MaxBytes, params.Block.MaxBytes) + } + + if params.Evidence.MaxBytes < 0 { + return fmt.Errorf("evidence.MaxBytes must be non negative. Got: %d", + params.Evidence.MaxBytes) + } + + if params.ABCI.VoteExtensionsEnableHeight < 0 { + return fmt.Errorf("ABCI.VoteExtensionsEnableHeight cannot be negative. Got: %d", params.ABCI.VoteExtensionsEnableHeight) + } + + if len(params.Validator.PubKeyTypes) == 0 { + return errors.New("len(Validator.PubKeyTypes) must be greater than 0") + } + + // Check if keyType is a known ABCIPubKeyType + for i := 0; i < len(params.Validator.PubKeyTypes); i++ { + keyType := params.Validator.PubKeyTypes[i] + if _, ok := ABCIPubKeyTypesToNames[keyType]; !ok { + return fmt.Errorf("params.Validator.PubKeyTypes[%d], %s, is an unknown pubkey type", + i, keyType) + } + } + + return nil +} diff --git a/types/part_set.go b/types/part_set.go new file mode 100644 index 0000000..f8a5fc8 --- /dev/null +++ b/types/part_set.go @@ -0,0 +1,29 @@ +package types + +import ( + "bytes" + "fmt" + + cmtbytes "github.com/strangelove-ventures/cometbft-client/libs/bytes" +) + +type PartSetHeader struct { + Total uint32 `json:"total"` + Hash cmtbytes.HexBytes `json:"hash"` +} + +// String returns a string representation of PartSetHeader. +// +// 1. total number of parts +// 2. first 6 bytes of the hash +func (psh PartSetHeader) String() string { + return fmt.Sprintf("%v:%X", psh.Total, cmtbytes.Fingerprint(psh.Hash)) +} + +func (psh PartSetHeader) IsZero() bool { + return psh.Total == 0 && len(psh.Hash) == 0 +} + +func (psh PartSetHeader) Equals(other PartSetHeader) bool { + return psh.Total == other.Total && bytes.Equal(psh.Hash, other.Hash) +} diff --git a/types/results.go b/types/results.go new file mode 100644 index 0000000..283ff32 --- /dev/null +++ b/types/results.go @@ -0,0 +1,8 @@ +package types + +import ( + abci "github.com/strangelove-ventures/cometbft-client/abci/types" +) + +// ABCIResults wraps the deliver tx results to return a proof. +type ABCIResults []*abci.ExecTxResult diff --git a/types/signable.go b/types/signable.go new file mode 100644 index 0000000..140e41e --- /dev/null +++ b/types/signable.go @@ -0,0 +1,23 @@ +package types + +import ( + "github.com/strangelove-ventures/cometbft-client/crypto/ed25519" + cmtmath "github.com/strangelove-ventures/cometbft-client/libs/math" +) + +var ( + // MaxSignatureSize is a maximum allowed signature size for the Proposal + // and Vote. + // XXX: secp256k1 does not have Size nor MaxSize defined. + MaxSignatureSize = cmtmath.MaxInt(ed25519.SignatureSize, 64) +) + +// Signable is an interface for all signable things. +// It typically removes signatures before serializing. +// SignBytes returns the bytes to be signed +// NOTE: chainIDs are part of the SignBytes but not +// necessarily the object themselves. +// NOTE: Expected to panic if there is an error marshaling. +type Signable interface { + SignBytes(chainID string) []byte +} diff --git a/types/time/time.go b/types/time/time.go new file mode 100644 index 0000000..022bdf5 --- /dev/null +++ b/types/time/time.go @@ -0,0 +1,58 @@ +package time + +import ( + "sort" + "time" +) + +// Now returns the current time in UTC with no monotonic component. +func Now() time.Time { + return Canonical(time.Now()) +} + +// Canonical returns UTC time with no monotonic component. +// Stripping the monotonic component is for time equality. +// See https://github.com/tendermint/tendermint/pull/2203#discussion_r215064334 +func Canonical(t time.Time) time.Time { + return t.Round(0).UTC() +} + +// WeightedTime for computing a median. +type WeightedTime struct { + Time time.Time + Weight int64 +} + +// NewWeightedTime with time and weight. +func NewWeightedTime(time time.Time, weight int64) *WeightedTime { + return &WeightedTime{ + Time: time, + Weight: weight, + } +} + +// WeightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. +func WeightedMedian(weightedTimes []*WeightedTime, totalVotingPower int64) (res time.Time) { + median := totalVotingPower / 2 + + sort.Slice(weightedTimes, func(i, j int) bool { + if weightedTimes[i] == nil { + return false + } + if weightedTimes[j] == nil { + return true + } + return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() + }) + + for _, weightedTime := range weightedTimes { + if weightedTime != nil { + if median <= weightedTime.Weight { + res = weightedTime.Time + break + } + median -= weightedTime.Weight + } + } + return +} diff --git a/types/time/time_test.go b/types/time/time_test.go new file mode 100644 index 0000000..1b1a30e --- /dev/null +++ b/types/time/time_test.go @@ -0,0 +1,56 @@ +package time + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestWeightedMedian(t *testing.T) { + m := make([]*WeightedTime, 3) + + t1 := Now() + t2 := t1.Add(5 * time.Second) + t3 := t1.Add(10 * time.Second) + + m[2] = NewWeightedTime(t1, 33) // faulty processes + m[0] = NewWeightedTime(t2, 40) // correct processes + m[1] = NewWeightedTime(t3, 27) // correct processes + totalVotingPower := int64(100) + + median := WeightedMedian(m, totalVotingPower) + assert.Equal(t, t2, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t3) || median.Equal(t3))) + + m[1] = NewWeightedTime(t1, 40) // correct processes + m[2] = NewWeightedTime(t2, 27) // correct processes + m[0] = NewWeightedTime(t3, 33) // faulty processes + totalVotingPower = int64(100) + + median = WeightedMedian(m, totalVotingPower) + assert.Equal(t, t2, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t2) || median.Equal(t2))) + + m = make([]*WeightedTime, 8) + t4 := t1.Add(15 * time.Second) + t5 := t1.Add(60 * time.Second) + + m[3] = NewWeightedTime(t1, 10) // correct processes + m[1] = NewWeightedTime(t2, 10) // correct processes + m[5] = NewWeightedTime(t2, 10) // correct processes + m[4] = NewWeightedTime(t3, 23) // faulty processes + m[0] = NewWeightedTime(t4, 20) // correct processes + m[7] = NewWeightedTime(t5, 10) // faulty processes + totalVotingPower = int64(83) + + median = WeightedMedian(m, totalVotingPower) + assert.Equal(t, t3, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t4) || median.Equal(t4))) +} diff --git a/types/tx.go b/types/tx.go new file mode 100644 index 0000000..9cba24b --- /dev/null +++ b/types/tx.go @@ -0,0 +1,155 @@ +package types + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + + "github.com/strangelove-ventures/cometbft-client/crypto/merkle" + "github.com/strangelove-ventures/cometbft-client/crypto/tmhash" + cmtbytes "github.com/strangelove-ventures/cometbft-client/libs/bytes" +) + +// TxKeySize is the size of the transaction key index +const TxKeySize = sha256.Size + +type ( + // Tx is an arbitrary byte array. + // NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed. + // Might we want types here ? + Tx []byte + + // TxKey is the fixed length array key used as an index. + TxKey [TxKeySize]byte +) + +// Hash computes the TMHASH hash of the wire encoded transaction. +func (tx Tx) Hash() []byte { + return tmhash.Sum(tx) +} + +func (tx Tx) Key() TxKey { + return sha256.Sum256(tx) +} + +// String returns the hex-encoded transaction as a string. +func (tx Tx) String() string { + return fmt.Sprintf("Tx{%X}", []byte(tx)) +} + +// Txs is a slice of Tx. +type Txs []Tx + +// Hash returns the Merkle root hash of the transaction hashes. +// i.e. the leaves of the tree are the hashes of the txs. +func (txs Txs) Hash() []byte { + hl := txs.hashList() + return merkle.HashFromByteSlices(hl) +} + +// Index returns the index of this transaction in the list, or -1 if not found +func (txs Txs) Index(tx Tx) int { + for i := range txs { + if bytes.Equal(txs[i], tx) { + return i + } + } + return -1 +} + +// IndexByHash returns the index of this transaction hash in the list, or -1 if not found +func (txs Txs) IndexByHash(hash []byte) int { + for i := range txs { + if bytes.Equal(txs[i].Hash(), hash) { + return i + } + } + return -1 +} + +func (txs Txs) Proof(i int) TxProof { + hl := txs.hashList() + root, proofs := merkle.ProofsFromByteSlices(hl) + + return TxProof{ + RootHash: root, + Data: txs[i], + Proof: *proofs[i], + } +} + +func (txs Txs) hashList() [][]byte { + hl := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + hl[i] = txs[i].Hash() + } + return hl +} + +// Txs is a slice of transactions. Sorting a Txs value orders the transactions +// lexicographically. +func (txs Txs) Len() int { return len(txs) } +func (txs Txs) Swap(i, j int) { txs[i], txs[j] = txs[j], txs[i] } +func (txs Txs) Less(i, j int) bool { + return bytes.Compare(txs[i], txs[j]) == -1 +} + +func ToTxs(txl [][]byte) Txs { + txs := make([]Tx, 0, len(txl)) + for _, tx := range txl { + txs = append(txs, tx) + } + return txs +} + +func (txs Txs) Validate(maxSizeBytes int64) error { + var size int64 + for _, tx := range txs { + size += int64(len(tx)) + if size > maxSizeBytes { + return fmt.Errorf("transaction data size exceeds maximum %d", maxSizeBytes) + } + } + return nil +} + +// ToSliceOfBytes converts a Txs to slice of byte slices. +func (txs Txs) ToSliceOfBytes() [][]byte { + txBzs := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] + } + return txBzs +} + +// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +type TxProof struct { + RootHash cmtbytes.HexBytes `json:"root_hash"` + Data Tx `json:"data"` + Proof merkle.Proof `json:"proof"` +} + +// Leaf returns the hash(tx), which is the leaf in the merkle tree which this proof refers to. +func (tp TxProof) Leaf() []byte { + return tp.Data.Hash() +} + +// Validate verifies the proof. It returns nil if the RootHash matches the dataHash argument, +// and if the proof is internally consistent. Otherwise, it returns a sensible error. +func (tp TxProof) Validate(dataHash []byte) error { + if !bytes.Equal(dataHash, tp.RootHash) { + return errors.New("proof matches different data hash") + } + if tp.Proof.Index < 0 { + return errors.New("proof index cannot be negative") + } + if tp.Proof.Total <= 0 { + return errors.New("proof total must be positive") + } + valid := tp.Proof.Verify(tp.RootHash, tp.Leaf()) + if valid != nil { + return errors.New("proof is not internally consistent") + } + return nil +} diff --git a/types/utils.go b/types/utils.go new file mode 100644 index 0000000..60e82fe --- /dev/null +++ b/types/utils.go @@ -0,0 +1,29 @@ +package types + +import "reflect" + +// Go lacks a simple and safe way to see if something is a typed nil. +// See: +// - https://dave.cheney.net/2017/08/09/typed-nils-in-go-2 +// - https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I/discussion +// - https://github.com/golang/go/issues/21538 +func isTypedNil(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +// Returns true if it has zero length. +func isEmpty(o interface{}) bool { + rv := reflect.ValueOf(o) + switch rv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return rv.Len() == 0 + default: + return false + } +} diff --git a/types/validator.go b/types/validator.go new file mode 100644 index 0000000..1effe57 --- /dev/null +++ b/types/validator.go @@ -0,0 +1,14 @@ +package types + +import "github.com/strangelove-ventures/cometbft-client/crypto" + +// Volatile state for each Validator +// NOTE: The ProposerPriority is not included in Validator.Hash(); +// make sure to update that method if changes are made here +type Validator struct { + Address Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + VotingPower int64 `json:"voting_power"` + + ProposerPriority int64 `json:"proposer_priority"` +} diff --git a/types/validator_set.go b/types/validator_set.go new file mode 100644 index 0000000..16d2f5f --- /dev/null +++ b/types/validator_set.go @@ -0,0 +1,23 @@ +package types + +// // ValidatorSet represent a set of *Validator at a given height. +// // +// // The validators can be fetched by address or index. +// // The index is in order of .VotingPower, so the indices are fixed for all +// // rounds of a given blockchain height - ie. the validators are sorted by their +// // voting power (descending). Secondary index - .Address (ascending). +// // +// // On the other hand, the .ProposerPriority of each validator and the +// // designated .GetProposer() of a set changes every round, upon calling +// // .IncrementProposerPriority(). +// // +// // NOTE: Not goroutine-safe. +// // NOTE: All get/set to validators should copy the value for safety. +type ValidatorSet struct { + // NOTE: persisted via reflect, must be exported. + Validators []*Validator `json:"validators"` + Proposer *Validator `json:"proposer"` + + // cached (unexported) + totalVotingPower int64 +} diff --git a/types/vote.go b/types/vote.go new file mode 100644 index 0000000..12ae6fb --- /dev/null +++ b/types/vote.go @@ -0,0 +1,6 @@ +package types + +import "github.com/strangelove-ventures/cometbft-client/crypto" + +// // Address is hex bytes. +type Address = crypto.Address