diff --git a/.github/workflows/copy-sdk.yml b/.github/workflows/copy-sdk.yml new file mode 100644 index 000000000..e6a30b474 --- /dev/null +++ b/.github/workflows/copy-sdk.yml @@ -0,0 +1,50 @@ +name: Copy SDK Files + +on: + workflow_dispatch: + schedule: + # Runs every day at 6pm PST (2am UST) + - cron: '0 2 * * *' + +jobs: + sync-docs-and-create-pr: + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + with: + ref: sync-docs + fetch-depth: 0 + + - name: Set Up Node.js + uses: actions/setup-node@v3 + with: + node-version: 20 + cache: yarn + + - name: Install Dependencies + run: | + yarn install --frozen-lockfile + + - name: Run Sync Script + run: | + chmod +x sync_script.sh + ./sync_script.sh + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v5 + with: + token: ${{ secrets.GITHUB_TOKEN }} + commit-message: Updated SDK spec changes from provenance/x + branch: update-sdk-docs + delete-branch: true + title: 'Update SDK docs' + labels: | + automated pr + assignees: webbushka + reviewers: peter-evans + team-reviewers: | + blockchain-core diff --git a/.gitignore b/.gitignore index 855bb5a03..81ef98e0c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ # Dependencies /node_modules +/provenance # Production /build diff --git a/docs/sdk/README.md b/docs/sdk/README.md new file mode 100644 index 000000000..af055f45b --- /dev/null +++ b/docs/sdk/README.md @@ -0,0 +1,22 @@ +--- +sidebar_position: 0 +--- + +# List of Modules + +Modules are the code components of the Provenance Blockchain that execute the majority of the business logic for applications. The [Cosmos SDK](https://docs.cosmos.network/v0.47) enables developers to build modules that utilize the core structure of the SDK to allow the modules to function together. To read more about creating modules, refer to the [Cosmos documentation on modules](https://docs.cosmos.network/v0.47/building-modules/intro). + +Provenance uses inherited modules from the Cosmos SDK, and has also developed modules that are specific to Provenance. + +* [Inherited Cosmos modules](https://docs.cosmos.network/v0.47/build/modules) +* [Attribute](./attribute/README.md) - Functions as a blockchain registry for storing \ pairs. +* [Exchange](./exchange/README.md) - Facilitates the trading of on-chain assets. +* [Hold](./hold/README.md) - Keeps track of funds in an account that have a hold placed on them. +* [ibchooks](./ibchooks/README.md) - Forked from https://github.com/osmosis-labs/osmosis/tree/main/x/ibchooks +* [Marker](./marker/README.md) - Allows for the creation of fungible tokens. +* [Metadata](./metadata/README.md) - Provides a system for referencing off-chain information. +* [msgfees](./msgfees/README.md) - Manages additional fees that can be applied to tx msgs. +* [Name](./name/README.md) - Provides a system for providing human-readable names as aliases for addresses. +* [Oracle](./oracle/README.md) - Provides the capability to dynamically expose query endpoints. +* [Reward](./reward/README.md) - Provides a system for distributing rewards to accounts. +* [Trigger](./trigger/README.md) - Provides a system for triggering transactions based on predeterminded events. diff --git a/docs/sdk/attribute/01_state.md b/docs/sdk/attribute/01_state.md new file mode 100644 index 000000000..f38860b3b --- /dev/null +++ b/docs/sdk/attribute/01_state.md @@ -0,0 +1,64 @@ +# State +The attribute module inserts all attributes into a basic state store. + + + - [Attribute KV-Store](#attribute-kv-store) + - [Key layout](#key-layout) + - [Attribute Record](#attribute-record) + - [Attribute Type](#attribute-type) + + + +## Attribute KV-Store + +The attribute module takes in an attribute supplied by the user and generates a key for it. This key is generated +by combinining the attribute prefix, address, attribute name, and a hashed value of the attribute value. This +can then be used to either store a marshalled attribute record, or retrieve the value it points to in the store. + +### Key layout +[0x02][address][attribute name][hashvalue] + +### Attribute Record +``` +// Attribute holds a typed key/value structure for data associated with an account +type Attribute struct { + // The attribute name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + + // The attribute value. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + + // The attribute value type. + AttributeType AttributeType `protobuf:"varint,3,opt,name=attribute_type,json=attributeType,proto3,enum=provenance.attribute.v1.AttributeType" json:"attribute_type,omitempty"` + + // The address the attribute is bound to + Address string `protobuf:"bytes,4,opt,name=address,proto3" json:"address,omitempty"` +} +``` + +### Attribute Type +``` +// AttributeType defines the type of the data stored in the attribute value +type AttributeType int32 + +const ( + // ATTRIBUTE_TYPE_UNSPECIFIED defines an unknown/invalid type + AttributeType_Unspecified AttributeType = 0 + // ATTRIBUTE_TYPE_UUID defines an attribute value that contains a string value representation of a V4 uuid + AttributeType_UUID AttributeType = 1 + // ATTRIBUTE_TYPE_JSON defines an attribute value that contains a byte string containing json data + AttributeType_JSON AttributeType = 2 + // ATTRIBUTE_TYPE_STRING defines an attribute value that contains a generic string value + AttributeType_String AttributeType = 3 + // ATTRIBUTE_TYPE_URI defines an attribute value that contains a URI + AttributeType_Uri AttributeType = 4 + // ATTRIBUTE_TYPE_INT defines an attribute value that contains an integer (cast as int64) + AttributeType_Int AttributeType = 5 + // ATTRIBUTE_TYPE_FLOAT defines an attribute value that contains a float + AttributeType_Float AttributeType = 6 + // ATTRIBUTE_TYPE_PROTO defines an attribute value that contains a serialized proto value in bytes + AttributeType_Proto AttributeType = 7 + // ATTRIBUTE_TYPE_BYTES defines an attribute value that contains an untyped array of bytes + AttributeType_Bytes AttributeType = 8 +) +``` diff --git a/docs/sdk/attribute/02_messages.md b/docs/sdk/attribute/02_messages.md new file mode 100644 index 000000000..ffb33a130 --- /dev/null +++ b/docs/sdk/attribute/02_messages.md @@ -0,0 +1,199 @@ +# Messages + +In this section we describe the processing of the staking messages and the corresponding updates to the state. + + + - [MsgAddAttributeRequest](#msgaddattributerequest) + - [MsgUpdateAttributeRequest](#msgupdateattributerequest) + - [MsgUpdateAttributeExpirationRequest](#msgupdateattributeexpirationrequest) + - [MsgDeleteAttributeRequest](#msgdeleteattributerequest) + - [MsgDeleteDistinctAttributeRequest](#msgdeletedistinctattributerequest) + - [MsgSetAccountDataRequest](#msgsetaccountdatarequest) + + + +## MsgAddAttributeRequest + +An attribute record is created using the `MsgAddAttributeRequest` message. + +```proto +// MsgAddAttributeRequest defines an sdk.Msg type that is used to add a new attribute to an account. +// Attributes may only be set in an account by the account that the attribute name resolves to. +message MsgAddAttributeRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + option (gogoproto.stringer) = false; + option (gogoproto.goproto_getters) = false; + + // The attribute name. + string name = 1; + // The attribute value. + bytes value = 2; + // The attribute value type. + AttributeType attribute_type = 3; + // The account to add the attribute to. + string account = 4; + // The address that the name must resolve to. + string owner = 5; + // Time that an attribute will expire. + google.protobuf.Timestamp expiration_date = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; +} +``` + +This message is expected to fail if: +- Any components of the request do not pass basic integrity and format checks +- Attribute value exceeds the maximum length +- Unable to normalize the name +- The account does not exist +- The name does not resolve to the owner address + +If successful, an attribute record will be created for the account. + +## MsgUpdateAttributeRequest + +The update attribute request method allows an existing attribute record to replace its value with a new one. + +```proto +// MsgUpdateAttributeRequest defines an sdk.Msg type that is used to update an existing attribute to an account. +// Attributes may only be set in an account by the account that the attribute name resolves to. +message MsgUpdateAttributeRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + option (gogoproto.stringer) = false; + option (gogoproto.goproto_getters) = false; + + // The attribute name. + string name = 1; + // The original attribute value. + bytes original_value = 2; + // The update attribute value. + bytes update_value = 3; + // The original attribute value type. + AttributeType original_attribute_type = 4; + // The update attribute value type. + AttributeType update_attribute_type = 5; + // The account to add the attribute to. + string account = 6; + // The address that the name must resolve to. + string owner = 7; +} +``` + +This message is expected to fail if: +- Any components of the request do not pass basic integrity and format checks +- Updated attribute value exceeds the maximum length +- Unable to normalize the original or updated attribute name +- Updated name and the original name don't match +- The owner account does not exist +- The updated name does not resolve to the owner address +- The original attribute does not exist + +If successful, the value of an attribute will be updated. + +## MsgUpdateAttributeExpirationRequest + +The update attribute expiration request method updates the attribute's expiration date. + +```proto +// MsgUpdateAttributeExpirationRequest defines an sdk.Msg type that is used to update an existing attribute's expiration +// date +message MsgUpdateAttributeExpirationRequest { + option (gogoproto.equal) = true; + option (gogoproto.stringer) = true; + option (gogoproto.goproto_stringer) = false; + + // The attribute name. + string name = 1; + // The original attribute value. + bytes value = 2; + // Time that an attribute will expire. + google.protobuf.Timestamp expiration_date = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; + // The account to add the attribute to. + string account = 4; + // The address that the name must resolve to. + string owner = 5; +} +``` + +This message is expected to fail if: +- Any components of the request do not pass basic integrity and format checks +- The owner account does not exist +- The name does not resolve to the owner address +- The attribute does not exist +- The expiration date is before current block height + +## MsgDeleteAttributeRequest + +The delete distinct attribute request method removes an existing account attribute. + +```proto +// MsgDeleteAttributeRequest defines a message to delete an attribute from an account +// Attributes may only be removed from an account by the account that the attribute name resolves to. +message MsgDeleteAttributeRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + option (gogoproto.stringer) = false; + option (gogoproto.goproto_getters) = false; + + // The attribute name. + string name = 1; + // The account to add the attribute to. + string account = 2; + // The address that the name must resolve to. + string owner = 3; +} +``` + +This message is expected to fail if: +- Any components of the request do not pass basic integrity and format checks +- The owner account does not exist +- The name does not resolve to the owner address +- The attribute does not exist + +## MsgDeleteDistinctAttributeRequest + +The delete distinct attribute request method removes an existing account attribute with a specific value. + +```proto +// MsgDeleteDistinctAttributeRequest defines a message to delete an attribute with matching name, value, and type from +// an account. Attributes may only be removed from an account by the account that the attribute name resolves to. +message MsgDeleteDistinctAttributeRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + option (gogoproto.stringer) = false; + option (gogoproto.goproto_getters) = false; + + // The attribute name. + string name = 1; + // The attribute value. + bytes value = 2; + // The account to add the attribute to. + string account = 3; + // The address that the name must resolve to. + string owner = 4; +} +``` + +This message is expected to fail if: +- Any components of the request do not pass basic integrity and format checks +- The owner account does not exist +- The name does not resolve to the owner address +- The attribute does not exist + +## MsgSetAccountDataRequest + +The set account data request method associates some data (a string) with an account. + +```protobuf +// MsgSetAccountDataRequest defines a message to set an account's accountdata attribute. +message MsgSetAccountDataRequest { + option (cosmos.msg.v1.signer) = "account"; + + string value = 1; + string account = 2; +} +``` + +This message is expected to fail if: +- The value is too long (as defined in attribute module params). +- The message is not signed by the provided account. diff --git a/docs/sdk/attribute/03_events.md b/docs/sdk/attribute/03_events.md new file mode 100644 index 000000000..ec85c5762 --- /dev/null +++ b/docs/sdk/attribute/03_events.md @@ -0,0 +1,111 @@ +# Events + +The attribute module emits the following events: + + + - [Attribute Added](#attribute-added) + - [Attribute Updated](#attribute-updated) + - [Attribute Expiration Updated](#attribute-expiration-updated) + - [Attribute Deleted](#attribute-deleted) + - [Distinct Attribute Deleted](#distinct-attribute-deleted) + - [Attribute Expired](#attribute-expired) + - [Account Data Updated](#account-data-updated) + +--- +## Attribute Added + +Fires when an attribute is successfully added. + +| Type | Attribute Key | Attribute Value | +|-------------------|---------------|------------------------| +| EventAttributeAdd | Name | \{name string\} | +| EventAttributeAdd | Value | \{attribute value\} | +| EventAttributeAdd | Type | \{attribute value type\} | +| EventAttributeAdd | Account | \{account address\} | +| EventAttributeAdd | Owner | \{owner address\} | +| EventAttributeAdd | Expiration | \{expiration date/time\} | + +`provenance.attribute.v1.EventAttributeAdd` + +--- +## Attribute Updated + +Fires when an existing attribute is successfully updated. + +| Type | Attribute Key | Attribute Value | +|----------------------|---------------|----------------------------| +| EventAttributeUpdate | Name | \{name string\} | +| EventAttributeUpdate | OriginalValue | \{attribute value\} | +| EventAttributeUpdate | OriginalType | \{attribute value type\} | +| EventAttributeUpdate | UpdateValue | \{new attribute value\} | +| EventAttributeUpdate | UpdateType | \{new attribute value type\} | +| EventAttributeUpdate | Account | \{account address\} | +| EventAttributeUpdate | Owner | \{owner address\} | + +`provenance.attribute.v1.EventAttributeUpdate` + +--- +## Attribute Expiration Updated + +Fires when an existing attribute's expiration is successfully updated. + +| Type | Attribute Key | Attribute Value | +|--------------------------------|--------------------|----------------------------| +| EventAttributeExpirationUpdate | Name | \{name string\} | +| EventAttributeExpirationUpdate | Value | \{attribute value\} | +| EventAttributeExpirationUpdate | Account | \{account address\} | +| EventAttributeExpirationUpdate | Owner | \{owner address\} | +| EventAttributeExpirationUpdate | OriginalExpiration | \{old expiration date/time\} | +| EventAttributeExpirationUpdate | UpdatedExpiration | \{new expiration date/time\} | + + +--- +## Attribute Deleted + +Fires when an existing attribute is deleted. + +| Type | Attribute Key | Attribute Value | +|----------------------|---------------|-------------------| +| EventAttributeDelete | Name | \{name string\} | +| EventAttributeDelete | Account | \{account address\} | +| EventAttributeDelete | Owner | \{owner address\} | + +`provenance.attribute.v1.EventAttributeDelete` + +--- +## Distinct Attribute Deleted + +Fires when an existing attribute is deleted distinctly. + +| Type | Attribute Key | Attribute Value | +|------------------------------|---------------|------------------------| +| EventAttributeDistinctDelete | Name | \{name string\} | +| EventAttributeDistinctDelete | Value | \{attribute value\} | +| EventAttributeDistinctDelete | AttributeType | \{attribute value type\} | +| EventAttributeDistinctDelete | Owner | \{owner address\} | +| EventAttributeDistinctDelete | Account | \{account address\} | + +`provenance.attribute.v1.EventAttributeDistinctDelete` + +--- +## Attribute Expired + +Fires when an attribute's expriration date/time has been reached and the attribute has been deleted. + +| Type | Attribute Key | Attribute Value | +|-----------------------|---------------|------------------------| +| EventAttributeExpired | Name | \{name string\} | +| EventAttributeExpired | Value | \{attribute value\} | +| EventAttributeExpired | AttributeType | \{attribute value type\} | +| EventAttributeExpired | Account | \{account address\} | +| EventAttributeExpired | Owner | \{owner address\} | +| EventAttributeExpired | Expiration | \{expiration date/time\} | + +--- +## Account Data Updated + +Fires when account data is updated for an account. + +| Type | Attribute Key | Attribute Value | +|-------------------------|---------------|------------------------| +| EventAccountDataUpdated | Account | \{account address\} | diff --git a/docs/sdk/attribute/04_params.md b/docs/sdk/attribute/04_params.md new file mode 100644 index 000000000..33d435b13 --- /dev/null +++ b/docs/sdk/attribute/04_params.md @@ -0,0 +1,7 @@ +# Parameters + +The attribute module contains the following parameters: + +| Key | Type | Example | +|------------------------|--------|---------| +| MaxValueLength | uint32 | 32 | \ No newline at end of file diff --git a/docs/sdk/attribute/README.md b/docs/sdk/attribute/README.md new file mode 100644 index 000000000..06051b9ad --- /dev/null +++ b/docs/sdk/attribute/README.md @@ -0,0 +1,17 @@ +# `x/attribute` + +## Abstract + +The purpose of the Attributes Module is to act as a registry that allows an Address to store `` pairs. +Every Name must be registered by the Name Service, and a Name have multiple Values associated with it. Values are required to have a type, and they can be set or retrieved by Name. + +This feature provides the blockchain with the capability to store and retrieve values by Name. It plays a major +part in some of our components such as smart contract creation process. It allows an Address to create and store +a named smart contract on the blockchain. + +## Contents + +1. **[State](01_state.md)** +1. **[Messages](02_messages.md)** +1. **[Events](03_events.md)** +1. **[Params](04_params.md)** \ No newline at end of file diff --git a/docs/sdk/exchange/01_concepts.md b/docs/sdk/exchange/01_concepts.md new file mode 100644 index 000000000..501768fb6 --- /dev/null +++ b/docs/sdk/exchange/01_concepts.md @@ -0,0 +1,336 @@ +# Exchange Concepts + +The `x/exchange` module facilitates the trading of on-chain assets. + +Markets provide fee structures and are responsible for identifying and triggering settlements. +Orders are created by users to indicate a desire to trade on-chain funds in a market. +The exchange module defines a portion of market fees to be paid to the chain (distributed like gas fees). + +--- + + - [Markets](#markets) + - [Required Attributes](#required-attributes) + - [Market Permissions](#market-permissions) + - [Settlement](#settlement) + - [Orders](#orders) + - [Ask Orders](#ask-orders) + - [Bid Orders](#bid-orders) + - [Partial Orders](#partial-orders) + - [External IDs](#external-ids) + - [Fees](#fees) + - [Order Creation Fees](#order-creation-fees) + - [Settlement Flat Fees](#settlement-flat-fees) + - [Settlement Ratio Fees](#settlement-ratio-fees) + - [Exchange Fees](#exchange-fees) + + +## Markets + +A market is a combination of on-chain setup and off-chain processes. +They are created by a governance proposal using the [MsgGovCreateMarketRequest](03_messages.md#msggovcreatemarketrequest) message. +Most aspects of the market are then manageable using permissioned endpoints. +Fees can only be managed with a governance proposal using the [MsgGovManageFeesRequest](03_messages.md#msggovmanagefeesrequest) message. + +Each market has a set of optional details designed for human-use, e.g. name, description, website url. + +A market is responsible (off-chain) for identifying order matches and triggering (on-chain) settlement. + +A market receives fees for order creation and order settlement. It also defines what fees are required and what is acceptable as payments. + +A market can delegate various [permissions](#market-permissions) to other accounts, allowing those accounts to use specific endpoints on behalf of the market. + +Markets can restrict who can create orders with them by defining account attributes that are required to create orders. See [Required Attributes](#required-attributes). + +Markets can control whether user-settlement is allowed. +When user-settlement is allowed, the [FillBids](03_messages.md#fillbids) and [FillAsks](03_messages.md#fillasks) endpoints can be used for orders in the market. + +A market can also control whether orders can be created for it. +When order creation is not allowed, any existing orders can still be settled or cancelled, but no new ones can be made (in that market). + +The fees collected by a market are kept in the market's account, and can be accessed using the [MarketWithdraw](03_messages.md#marketwithdraw) endpoint. + +See also: [Market](03_messages.md#market). + +### Required Attributes + +There is a separate list of attributes required to create each order type. +If one or more attributes are required to create an order of a certain type, the order creator (buyer or seller) must have all of them on their account. + +Required attributes can have a wildcard at the start to indicate that any attribute with the designated base and one (or more) level(s) is applicable. +The only place a wildcard `*` is allowed is at the start of the string and must be immediately followed by a period. +For example, a required attribute of `*.kyc.pb` would match an account attribute of `buyer.kyc.pb` or `special.seller.kyc.pb`, but not `buyer.xkyc.pb` (wrong base) or `kyc.pb` (no extra level). + +Attributes are defined using the [x/name](/x/name/spec/README.md) module, and are managed on accounts using the [x/attributes](/x/attribute/spec/README.md) module. + +### Market Permissions + +The different available permissions are defined by the [Permission](03_messages.md#permission) proto enum message. + +Each market manages its own set of [AccessGrants](03_messages.md#accessgrant), which confer specific permissions to specific addresses. + +* `PERMISSION_UNSPECIFIED`: it is an error to try to use this permission for anything. +* `PERMISSION_SETTLE`: accounts with this permission can use the [MarketSettle](03_messages.md#marketsettle) endpoint for a market. +* `PERMISSION_SET_IDS`: accounts with this permission can use the [MarketSetOrderExternalID](03_messages.md#marketsetorderexternalid) endpoint for a market. +* `PERMISSION_CANCEL`: accounts with this permission can use the [CancelOrder](03_messages.md#cancelorder) endpoint to cancel orders in a market. +* `PERMISSION_WITHDRAW`: accounts with this permission can use the [MarketWithdraw](03_messages.md#marketwithdraw) endpoint for a market. +* `PERMISSION_UPDATE`: accounts with this permission can use the [MarketUpdateDetails](03_messages.md#marketupdatedetails), [MarketUpdateEnabled](03_messages.md#marketupdateenabled), and [MarketUpdateUserSettle](03_messages.md#marketupdateusersettle) endpoints for a market. +* `PERMISSION_PERMISSIONS`: accounts with this permission can use the [MarketManagePermissions](03_messages.md#marketmanagepermissions) endpoint for a market. +* `PERMISSION_ATTRIBUTES`: accounts with this permission can use the [MarketManageReqAttrs](03_messages.md#marketmanagereqattrs) endpoint for a market. + + +### Settlement + +Each market is responsible for the settlement of its orders. +To do this, it must first identify a matching set of asks and bids. +The [MarketSettle](03_messages.md#marketsettle) endpoint is then used to settle and clear orders. +If the market allows, users can also settlement orders with their own funds using the [FillBids](03_messages.md#fillbids) or [FillAsks](03_messages.md#fillasks) endpoints. + +During settlement, at most one order can be partially filled, and it must be the last order in its list (in [MsgMarketSettleRequest](03_messages.md#msgmarketsettlerequest)). +That order must allow partial settlement (defined at order creation) and be evenly divisible (see [Partial Orders](#partial-orders)). +The market must also set the `expect_partial` field to `true` in the request. +If all of the orders are being filled in full, the `expect_partial` field must be `false`. + +All orders in a settlement must have the same `assets` denoms, and also the same `price` denoms, but the fees can be anything. +The total bid price must be at least the total ask price (accounting for partial fulfillment if applicable). + +During settlement: + +1. The `assets` are transferred directly from the seller(s) to the buyer(s). +2. The `price` funds are transferred directly from the buyer(s) to the seller(s). +3. All settlement fees are transferred directly from the seller(s) and buyer(s) to the market. +4. The exchange's portion of the fees is transferred from the market to the chain's fee collector. + +With complex settlements, it's possible that an ask order's `assets` go to a different account than the `price` funds come from, and vice versa for bid orders. + +Transfers of the `assets` and `price` bypass the quarantine module since order creation can be viewed as acceptance of those funds. + +Transfers do not bypass any other send-restrictions (e.g. `x/marker` or `x/sanction` module restrictions). +E.g. If an order's funds are in a sanctioned account, settlement of that order will fail since those funds cannot be removed from that account. +Or, if a marker has required attributes, but the recipient does not have those attributes, settlement will fail. + + +## Orders + +Orders are created by users that want to trade assets in a market. + +When an order is created, a hold is placed on the applicable funds. +Those funds will remain in the user's account until the order is settled or cancelled. +The holds ensure that the required funds are available at settlement without the need of an intermediary holding/clearing account. +During settlement, the funds get transferred directly between the buyers and sellers, and fees are paid from the buyers and sellers directly to the market. + +Orders can be cancelled by either the user or the market. + +Once an order is created, it cannot be modified except in these specific ways: + +1. When an order is partially filled, the amounts in it will be reduced accordingly. +2. An order's external id can be changed by the market. +3. Cancelling an order will release the held funds and delete the order. +4. Settling an order in full will delete the order. + + +### Ask Orders + +Ask orders represent a desire to sell some specific `assets` at a minimum `price`. +When an ask order is created, a hold is placed on the `assets` being sold. +If the denom of the `seller_settlement_flat_fee` is different from the denom of the price, a hold is placed on that flat fee too. +It's possible for an ask order to be filled at a larger `price` than initially defined. + +The `seller_settlement_flat_fee` is verified at the time of order creation, but only paid during settlement. + +When an ask order is settled, the `assets` are transferred directly to the buyer(s) and the `price` is transferred directly from the buyer(s). +Then the seller settlement fees are transferred from the seller to the market. + +During settlement, the seller settlement fee ratio with the appropriate `price` denom is applied to the price the seller is receiving. +That result is then added to the ask order's `seller_settlement_flat_fee` to get the total settlement fee to be paid for the ask order. +In this way, the seller's settlement ratio fee can be taken out of the `price` funds that the seller is receiving. +If the `seller_settlement_flat_fee` is the same denom as the price, it can come out of the `price` funds too. + +Because the fees can come out of the `price` funds, it's possible (probable) that the total `price` funds that the seller ends up with, is less than their requested price. + +For example, a user creates an ask order to sell `2cow` (the `assets`) and wants at least `15chicken` (the `price`). +The market finds a way to settle that order where the seller will get `16chicken`, but the seller's settlement fee will end up being `2chicken`. +During settlement, the `2cow` are transferred from the seller to the buyer, and `16chicken` are transferred from the buyer to the seller. +Then, `2chicken` are transferred from the seller to the market. +So the seller ends up with `14chicken` for their `2cow`. + +See also: [AskOrder](03_messages.md#askorder). + + +### Bid Orders + +Bid orders represent a desire to buy some specific `assets` at a specific `price`. +When a bid order is created, a hold is placed on the order's `price` and `buyer_settlement_fees`. + +When a bid order is settled, the `price` is transferred directly to the seller(s) and the assets are transferred directly from the seller(s). +Then, the buyer settlement fees are transferred from the buyer to the market. + +The `buyer_settlement_fees` are verified at the time of order creation, but only paid during settlement. +They are paid in addition to the `price` the buyer is paying. + +See also: [BidOrder](03_messages.md#bidorder). + + +### Partial Orders + +Both Ask orders and Bid orders can optionally allow partial fulfillment by setting the `allow_partial` field to `true` when creating the order. + +When an order is partially filled, the order's same `assets:price` and `assets:settlement-fees` ratios are maintained. + +Since only whole numbers are allowed, this means that: + +* ` * / ` must be a whole number. +* ` * / ` must also be a whole number. + +When an ask order is partially filled, it's `price` and `seller_settlement_flat_fee` are reduced at the same rate as the assets, even if the seller is receiving a higher price than requested. +E.g. If an ask order selling `2cow` for `10chicken` is partially settled for `1cow` at a price of `6chicken`, the seller will receive the `6chicken` but the updated ask order will list that there's still `1cow` for sale for `5chicken`. + +When an order is partially filled, its amounts are updated to reflect what hasn't yet been filled. + +An order that allows partial fulfillment can be partially filled multiple times (as long as the numbers allow for it). + +Settlement will fail if an order is being partially filled that either doesn't allow it, or cannot be evenly split at the needed `assets` amount. + + +### External IDs + +Orders can be identified using an off-chain identifier. +These can be provided during order creation (in the `external_id` field). +They can also be set by the market after the order has been created using the [MarketSetOrderExternalID](03_messages.md#marketsetorderexternalid) endpoint. + +Each external id is unique inside a market. +I.e. two orders in the same market cannot have the same external id, but two orders in different markets **can** have the same external id. +An attempt (by a user) to create an order with a duplicate external id, will fail. +An attempt (by a market) to change an order's external id to one already in use, will fail. + +The external ids are optional, so it's possible that multiple orders in a market have an empty string for the external id. +Orders with external ids can be looked up using the [GetOrderByExternalID](05_queries.md#getorderbyexternalid) query (as well as the other order queries). + +External ids are limited to 100 characters. + + +## Fees + +Markets dictate the minimum required fees. It's possible to pay more than the required fees, but not less. + +A portion of the fees that a market collects are sent to the blockchain and distributed similar to gas fees. +This portion is dictated by the exchange module in its [params](06_params.md). + +There are three types of fees: + +* Order creation: Flat fees paid at the time that an order is created. +* Settlement flat fees: A fee paid during settlement that is the same for each order. +* Settlement ratio fees: A fee paid during settlement that is based off of the order's price. + +For each fee type, there is a configuration for each order type. +E.g. the ask-order creation fee is configured separately from the bid-order creation fee. + +Each fee type is only paid in a single denom, but a market can define multiple options for each. +E.g. if flat fee options for a specific fee are `5chicken,1cow`, users can provide **either** `5chicken` or `1cow` to fulfill that required fee. + +If a market does not have any options defined for a given fee type, that fee is not required. +E.g. if the `fee_create_ask_flat` field is empty, there is no fee required to create an ask order. + +All fees except the seller settlement ratio fees must be provided with order creation, and are validated at order creation. + + +### Order Creation Fees + +This is a fee provided in the `order_creation_fee` field of the order creation `Msg`s and is collected immediately. +These are paid on top of any gas or message fees required. + +Each order type has its own creation fee configuration: + +* `fee_create_ask_flat`: The available `Coin` fee options for creating an ask order. +* `fee_create_bid_flat`: The available `Coin` fee options for creating a bid order. + + +### Settlement Flat Fees + +This is a fee provided as part of an order, but is not collected until settlement. + +Each order type has its own settlement flat fee configuration: + +* `fee_seller_settlement_flat`: The available `Coin` fee options that are paid by the seller during settlement. +* `fee_buyer_settlement_flat`: The available `Coin` fee options that are paid by the buyer during settlement. + +The ask order's `seller_settlement_flat_fee` must be at least one of the available `fee_seller_settlement_flat` options. +The bid order's `buyer_settlement_fees` must be enough to cover one of the `fee_buyer_settlement_flat` options plus one of the buyer settlement ratio fee options. + + +### Settlement Ratio Fees + +A [FeeRatio](03_messages.md#feeratio) is a pair of `Coin`s defining a `price` to `fee` ratio. + +Each order type has its own settlement ratio fee configurations: + +* `fee_seller_settlement_ratios`: The available `FeeRatio` options that are applied to the `price` received. +* `fee_buyer_settlement_ratios`: The available `FeeRatio` options that are applied to the bid order's `price`. + +If a market defines both buyer and seller settlement ratios, they should define ratios in each with the same set of `price` denoms. +E.g. if there's a `fee_buyer_settlement_ratios` entry of `100chicken:1cow`, there should be an entry in `fee_seller_settlement_ratios` with a price denom of `chicken` (or `fee_seller_settlement_ratios` should be empty). + +If a market requires both, but there's a price denom in the `fee_buyer_settlement_ratios` that isn't in `fee_seller_settlement_ratios`, then orders with that denom in their `price` cannot be settled. +If a market requires both, but there's a price denom in the `fee_seller_settlement_ratios` that isn't in `fee_buyer_settlement_ratios`, then bid orders with that denom in their `price` cannot be created, so ask orders with that price denom will have nothing to settle with. + +A `FeeRatio` can have a zero `fee` amount (but not a zero `price` amount), e.g. `1chicken:0chicken` is okay, but `0chicken:1chicken` is bad. +This allows a market to not charge a ratio fee for a specific `price` denom. + +A `FeeRatio` with the same `price` and `fee` denoms must have a larger price amount than fee amount. + + +#### Seller Settlement Ratio Fee + +A market's `fee_seller_settlement_ratios` are limited to `FeeRatio`s that have the same `price` and `fee` denom. +This ensures that the seller settlement fee can always be paid by the funds the seller is receiving. + +To calculate the seller settlement ratio fee, the following formula is used: ` * / `. +If that is not a whole number, it is rounded up to the next whole number. + +E.g. A market has `1000chicken:3chicken` in `fee_seller_settlement_ratios`. + +* An order is settling for `400chicken`: `400 * 3 / 1000` = `1.2`, which is rounded up to `2chicken`. +* An order is settling for `3000chicken`: `3000 * 3 / 1000` = `9`, which doesn't need rounding, so stays at `9chicken`. + +The actual amount isn't known until settlement, but a minimum can be calculated by applying the applicable ratio to an ask order's `price`. +The seller settlement ratio fee will be at least that amount, but since it gets larger slower than the price, ` - - ` is the least amount the seller will end up with. + + +#### Buyer Settlement Ratio Fee + +A market's `fee_buyer_settlement_ratios` can have `FeeRatios` with any denom pair, i.e. the `price` and `fee` do not need to be the same denom. +It can also have multiple entries with the same `price` denom or `fee` denom, but it can only have one entry for each `price` to `fee` denom pair. +E.g. a market can have `100chicken:1cow` and also `100chicken:7chicken`, `500cow:1cow`, and `5cow:1chicken`, but it couldn't also have `105chicken:2cow`. + +To calculate the buyer settlement ratio fee, the following formula is used: ` * / `. +If that is not a whole number, the chosen ratio is not applicable to the bid order's price and cannot be used. +The user will need to either use a different ratio or change their bid price. + +The buyer settlement ratio fee should be added to the buyer settlement flat fee and provided in the `buyer_settlement_fees` in the bid order. +The ratio and flat fees can be in any denoms allowed by the market, and do not have to be the same. + + +### Exchange Fees + +A portion of the fees collected by a market, are given to the exchange. +The amount is defined using basis points in the exchange module's [Params](06_params.md#params) and can be configured differently for specific denoms. + +When the market collects fees, the applicable basis points are looked up and applied to the amount being collected. +That amount is then transferred from the market's account to the chain's fee collector (similar to gas fees). + +The following formula is used for each denom in the fees being collected: ` * / 10,000`. +If that is not a whole number, it is rounded up to the next whole number. + +For example, Say the exchange has a default split of `500` (basis points), and a specific split of `100` for `rooster`. +When a market collects a fee of `1500hen,710rooster`: +There is no specific split for `hen`, so the default `500` is used for them. `1500 * 500 / 10,000` = `75hen` (a whole number, so no rounding is needed). +The specific `rooster` split of `100` is used for those: `710 * 100 / 10,000` = `7.1` which gets rounded up to `8rooster`. +So the market will first receive `1500hen,710rooster` from the buyer(s)/seller(s), then `75hen,8rooster` is transferred from the market to the fee collector. +The market is then left with `1425hen:702rooster`. + +During [MarketSettle](03_messages.md#marketsettle), the math and rounding is applied to the total fee being collected (as opposed to applying it to each order's fee first, then summing that). + +During order creation, the exchange's portion of the order creation fee is calculated and collected from the creation fee provided in the `Msg`. + +During [FillBids](03_messages.md#fillbids) or [FillAsks](03_messages.md#fillasks), the settlement fees are summed and collected separately from the order creation fee. +That means the math and rounding is done twice, once for the total settlement fees and again for the order creation fee. +This is done so that the fees are collected the same as if an order were created and later settled by the market. diff --git a/docs/sdk/exchange/02_state.md b/docs/sdk/exchange/02_state.md new file mode 100644 index 000000000..710d6a7dd --- /dev/null +++ b/docs/sdk/exchange/02_state.md @@ -0,0 +1,283 @@ +# Exchange State + +The Exchange module manages several things in state. + +Big-endian ordering is used for all conversions between numbers and byte arrays. + +--- + + - [Params](#params) + - [Default Split](#default-split) + - [Specific Denom Split](#specific-denom-split) + - [Markets](#markets) + - [Market Create-Ask Flat Fee](#market-create-ask-flat-fee) + - [Market Create-Bid Flat Fee](#market-create-bid-flat-fee) + - [Market Seller Settlement Flat Fee](#market-seller-settlement-flat-fee) + - [Market Seller Settlement Ratio Fee](#market-seller-settlement-ratio-fee) + - [Market Buyer Settlement Flat Fee](#market-buyer-settlement-flat-fee) + - [Market Buyer Settlement Ratio Fee](#market-buyer-settlement-ratio-fee) + - [Market Inactive Indicator](#market-inactive-indicator) + - [Market User-Settle Indicator](#market-user-settle-indicator) + - [Market Permissions](#market-permissions) + - [Market Create-Ask Required Attributes](#market-create-ask-required-attributes) + - [Market Create-Bid Required Attributes](#market-create-bid-required-attributes) + - [Market Account](#market-account) + - [Market Details](#market-details) + - [Known Market ID](#known-market-id) + - [Last Market ID](#last-market-id) + - [Orders](#orders) + - [Ask Orders](#ask-orders) + - [Bid Orders](#bid-orders) + - [Last Order ID](#last-order-id) + - [Indexes](#indexes) + - [Market to Order](#market-to-order) + - [Owner Address to Order](#owner-address-to-order) + - [Asset Denom to Order](#asset-denom-to-order) + - [Market External ID to Order](#market-external-id-to-order) + + +## Params + +All params entries start with the type byte `0x00` followed by a string identifying the entry type. + +Each `` is stored as a `uint16` (2 bytes) in big-endian order. + +The byte `0x1E` is used in a few places as a record separator. + +See also: [Params](06_params.md#params). + + +### Default Split + +The default split defines the split amount (in basis points) the exchange receives of fees when there is not an applicable specific denom split. + +* Key:`0x00 | "split" (5 bytes)` +* Value: `` + + +### Specific Denom Split + +A specific denom split is a split amount (in basis points) the exchange receives of fees for fees paid in a specific denom. + +* Key: `0x00 | "split" (5 bytes) | ` +* Value: `` + +See also: [DenomSplit](06_params.md#denomsplit). + + +## Markets + +Each aspect of a market is stored separately for specific lookup. + +Each `` is a `uint32` (4 bytes) in big-endian order. + +Most aspects of a market have keys that start with the type byte `0x01`, followed by the `` then another type byte. + +See also: [Market](03_messages.md#market). + + +### Market Create-Ask Flat Fee + +One entry per configured denom. + +* Key: `0x01 | | 0x00 | ` +* Value: `` + + +### Market Create-Bid Flat Fee + +One entry per configured denom. + +* Key: `0x01 | | 0x01 | ` +* Value: `` + + +### Market Seller Settlement Flat Fee + +One entry per configured denom. + +* Key: `0x01 | | 0x02 | ` +* Value: `` + + +### Market Seller Settlement Ratio Fee + +One entry per configured price:fee denom pair. + +* Key: `0x01 | | 0x03 | | 0x1E | ` +* Value: ` | 0x1E | ` + +See also: [FeeRatio](03_messages.md#feeratio). + + +### Market Buyer Settlement Flat Fee + +One entry per configured denom. + +* Key: `0x01 | | 0x04 | ` +* Value: `` + + +### Market Buyer Settlement Ratio Fee + +One entry per configured price:fee denom pair. + +* Key: `0x01 | | 0x05 | | 0x1E | ` +* Value: ` | 0x1E | ` + +See also: [FeeRatio](03_messages.md#feeratio). + + +### Market Inactive Indicator + +When a market has `accepting_orders = false`, this state entry will exist. +When it has `accepting_orders = true`, this entry will not exist. + +* Key: `0x01 | | 0x06` +* Value: `` + + +### Market User-Settle Indicator + +When a market has `allow_user_settlement = true`, this state entry will exist. +When it has `allow_user_settlement = false`, this entry will not exist. + +* Key: `0x01 | | 0x07` +* Value: `` + + +### Market Permissions + +When an address has a given permission in a market, the following entry will exist. + +* Key: `0x01 | | 0x08 | | | ` +* Value: `` + +The `` is a single byte as `uint8` with the same values as the enum entries, e.g. `PERMISSION_CANCEL` is `0x03`. + +See also: [AccessGrant](03_messages.md#accessgrant) and [Permission](03_messages.md#permission). + + +### Market Create-Ask Required Attributes + +* Key: `0x01 | | 0x09 | 0x00` +* Value: `` + + +### Market Create-Bid Required Attributes + +* Key: `0x01 | | 0x09 | 0x01` +* Value: `` + + +### Market Account + +Each market has an associated `MarketAccount` with an address derived from the `market_id`. +Each `MarketAccount` is stored using the `Accounts` module. + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/market.proto#L14-L26 + + +### Market Details + +The [MarketDetails](03_messages.md#marketdetails) are stored as part of the `MarketAccount` (in the `x/auth` module). + + +### Known Market ID + +These entries are used to indicate that a given market exists. + +* Key: `0x07 | ` +* Value: `` + + +### Last Market ID + +This indicates the last market-id that was auto-selected for use. + +When a `MsgGovCreateMarketRequest` is processed that has a `market_id` of `0` (zero), the next available market id is auto selected. +Starting with the number after what's in this state entry, each market id is sequentially checked until an available one is found. +The new market gets that id, then this entry is then updated to indicate what that was. + +* Key: `0x06` +* Value: `` + +When a `MsgGovCreateMarketRequest` is processed that has a non-zero `market_id`, this entry is not considered or altered. + + +## Orders + +Each `` is a `uint64` (8 bytes) in big-endian order. + +Orders are stored using the following format: + +* Key: `0x02 | ` +* Value ` | protobuf(order type)` + +The `` has these possible values: +* `0x00` => Ask Order +* `0x01` => Bid Order + + +### Ask Orders + +* Key: `0x02 | ` +* Value: `0x00 | protobuf(AskOrder)` + +See also: [AskOrder](03_messages.md#askorder). + + +### Bid Orders + +* Key: `0x02 | ` +* Value: `0x01 | protobuf(BidOrder)` + +See also: [BidOrder](03_messages.md#bidorder). + + +### Last Order ID + +Whenever an order is created, this value is looked up and incremented to get the new order's id. +Then this entry is updated to reflect the new order. + +* Key: `0x08` +* Value: `` + + +## Indexes + +Several index entries are maintained to help facilitate look-ups. + +The `` values are the same as those described in [Orders](#orders). + + +### Market to Order + +This index can be used to find orders in a given market. + +* Key: `0x03 | | ` +* Value: `` + + +### Owner Address to Order + +This index can be used to find orders with a given buyer or seller. + +* Key: `0x04 | | | ` +* Value: `` + + +### Asset Denom to Order + +This index can be used to find orders involving a given `assets` denom. + +* Key: `0x05 | | ` +* Value: `` + + +### Market External ID to Order + +This index is used to look up orders by their market and external id. + +* Key: `0x09 | | ` +* Value: `` diff --git a/docs/sdk/exchange/03_messages.md b/docs/sdk/exchange/03_messages.md new file mode 100644 index 000000000..b2e18557f --- /dev/null +++ b/docs/sdk/exchange/03_messages.md @@ -0,0 +1,472 @@ +# Exchange Messages + +The exchange module has `Msg` endpoints for users, markets, and governance proposals. + +--- + + - [User Endpoints](#user-endpoints) + - [CreateAsk](#createask) + - [CreateBid](#createbid) + - [CancelOrder](#cancelorder) + - [FillBids](#fillbids) + - [FillAsks](#fillasks) + - [Market Endpoints](#market-endpoints) + - [MarketSettle](#marketsettle) + - [MarketSetOrderExternalID](#marketsetorderexternalid) + - [MarketWithdraw](#marketwithdraw) + - [MarketUpdateDetails](#marketupdatedetails) + - [MarketUpdateEnabled](#marketupdateenabled) + - [MarketUpdateUserSettle](#marketupdateusersettle) + - [MarketManagePermissions](#marketmanagepermissions) + - [MarketManageReqAttrs](#marketmanagereqattrs) + - [Governance Proposals](#governance-proposals) + - [GovCreateMarket](#govcreatemarket) + - [GovManageFees](#govmanagefees) + - [GovUpdateParams](#govupdateparams) + + +## User Endpoints + +There are several endpoints available for all users, but some markets might have restrictions on their use. + + +### CreateAsk + +An ask order indicates the desire to sell some `assets` at a minimum `price`. +They are created using the `CreateAsk` endpoint. + +Markets can define a set of attributes that an account must have in order to create ask orders in them. +So, this endpoint might not be available, depending on the `seller` and the `market_id`. +Markets can also disable order creation altogether, making this endpoint unavailable for that `market_id`. + +It is expected to fail if: +* The `market_id` does not exist. +* The market is not allowing orders to be created. +* The market requires attributes in order to create ask orders and the `seller` is missing one or more. +* The `assets` are not in the `seller`'s account. +* The `price` is in a denom not supported by the market. +* The `seller_settlement_flat_fee` is in a denom different from the `price`, and is not in the `seller`'s account. +* The `seller_settlement_flat_fee` is insufficient (as dictated by the market). +* The `external_id` value is not empty and is already in use in the market. +* The `order_creation_fee` is not in the `seller`'s account. + +#### MsgCreateAskRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L68-L76 + +#### AskOrder + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/orders.proto#L28-L53 + +#### MsgCreateAskResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L78-L82 + + +### CreateBid + +A bid order indicates the desire to buy some `assets` at a specific `price`. +They are created using the `CreateBid` endpoint. + +Markets can define a set of attributes that an account must have in order to create bid orders in them. +So, this endpoint might not be available, depending on the `buyer` and the `market_id`. +Markets can also disable order creation altogether, making this endpoint unavailable for that `market_id`. + +It is expected to fail if: +* The `market_id` does not exist. +* The market is not allowing orders to be created. +* The market requires attributes in order to create bid orders and the `buyer` is missing one or more. +* The `price` funds are not in the `buyer`'s account. +* The `price` is in a denom not supported by the market. +* The `buyer_settlement_fees` are not in the `buyer`'s account. +* The `buyer_settlement_fees` are insufficient (as dictated by the market). +* The `external_id` value is not empty and is already in use in the market. +* The `order_creation_fee` is not in the `buyer`'s account. + +#### MsgCreateBidRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L84-L92 + +#### BidOrder + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/orders.proto#L55-L78 + +#### MsgCreateBidResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L94-L98 + + +### CancelOrder + +Orders can be cancelled using the `CancelOrder` endpoint. +When an order is cancelled, the hold on its funds is released and the order is deleted. + +Users can cancel their own orders at any time. +Market actors with the `PERMISSION_CANCEL` permission can also cancel orders in that market at any time. + +Order creation fees are **not** refunded when an order is cancelled. + +It is expected to fail if: +* The order does not exist. +* The `signer` is not one of: + * The order's owner (e.g. `buyer` or `seller`). + * An account with `PERMISSION_CANCEL` in the order's market. + * The governance module account (`authority`). + +#### MsgCancelOrderRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L100-L110 + +#### MsgCancelOrderResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L112-L113 + + +### FillBids + +If a market allows user-settlement, users can use the `FillBids` endpoint to settle one or more bids with their own `assets`. +This is similar to an "Immediate or cancel" `AskOrder` with the sum of the provided bids' assets and prices. +Fees are paid the same as if an `AskOrder` were actually created and settled normally with the provided bids. +The `seller` must be allowed to create an `AskOrder` in the given market. + +It is expected to fail if: +* The market does not exist. +* The market is not allowing orders to be created. +* The market does not allow user-settlement. +* The market requires attributes in order to create ask orders and the `seller` is missing one or more. +* One or more `bid_order_ids` are not bid orders (or do not exist). +* One or more `bid_order_ids` are in a market other than the provided `market_id`. +* The `total_assets` are not in the `seller`'s account. +* The sum of bid order `assets` does not equal the provided `total_assets`. +* The `seller` or one of the `buyer`s are sanctioned, or are not allowed to possess the funds they are to receive. +* The `seller_settlement_flat_fee` is insufficient. +* The `seller_settlement_flat_fee` is not in the `seller`'s account (after `assets` and `price` funds have been transferred). +* The `ask_order_creation_fee` is insufficient. +* The `ask_order_creation_fee` is not in the `seller`'s account (after all other transfers have been made). + +#### MsgFillBidsRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L115-L135 + +#### MsgFillBidsResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L137-L138 + + +### FillAsks + +If a market allows user-settlement, users can use the `FillAsks` endpoint to settle one or more asks with their own price funds. +This is similar to an "Immediate or cancel" `BidOrder` with the sum of the provided asks' assets and prices. +Fees are paid the same as if a `BidOrder` were actually created and settled normally with the provided asks. +The `buyer` must be allowed to create a `BidOrder` in the given market. + +It is expected to fail if: +* The market does not exist. +* The market is not allowing orders to be created. +* The market does not allow user-settlement. +* The market requires attributes in order to create bid orders and the `buyer` is missing one or more. +* One or more `ask_order_ids` are not ask orders (or do not exist). +* One or more `ask_order_ids` are in a market other than the provided `market_id`. +* The `total_price` funds are not in the `buyer`'s account. +* The sum of ask order `price`s does not equal the provided `total_price`. +* The `buyer` or one of the `seller`s are sanctioned, or are not allowed to possess the funds they are to receive. +* The `buyer_settlement_fees` are insufficient. +* The `buyer_settlement_fees` are not in the `buyer`'s account (after `assets` and `price` funds have been transferred). +* The `bid_order_creation_fee` is insufficient. +* The `bid_order_creation_fee` is not in the `buyer`'s account (after all other transfers have been made). + +#### MsgFillAsksRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L140-L161 + +#### MsgFillAsksResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L163-L164 + + +## Market Endpoints + +Several endpoints are only available to accounts designated by the market. +These are all also available for use in governance proposals using the governance module account (aka `authority`) as the `admin`. + + +### MarketSettle + +Orders are settled using the `MarketSettle` endpoint. +The `admin` must have the `PERMISSION_SETTLE` permission in the market (or be the `authority`). + +The market is responsible for identifying order matches. +Once identified, this endpoint is used to settle and clear the matched orders. + +All orders in a settlement must have the same asset denom and the same price denom. + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_SETTLE` in the market, and is not the `authority`. +* One or more `ask_order_ids` are not ask orders, or do not exist, or are in a market other than the provided `market_id`. +* One or more `bid_order_ids` are not bid orders, or do not exist, or are in a market other than the provided `market_id`. +* There is more than one denom in the `assets` of all the provided orders. +* There is more than one denom in the `price` of all the provided orders. +* The market requires a seller settlement ratio fee, but there is no ratio defined for the `price` denom. +* Two or more orders are being partially filled. +* One or more orders cannot be filled at all with the `assets` or `price` funds available in the settlement. +* An order is being partially filled, but `expect_partial` is `false`. +* All orders are being filled in full, but `expect_partial` is `true`. +* One or more of the `buyer`s and `seller`s are sanctioned, or are not allowed to possess the funds they are to receive. + +#### MsgMarketSettleRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L166-L183 + +#### MsgMarketSettleResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L185-L186 + + +### MarketSetOrderExternalID + +Some markets might want to attach their own identifiers to orders. +This is done using the `MarketSetOrderExternalID` endpoint. +The `admin` must have the `PERMISSION_SET_IDS` permission in the market (or be the `authority`). + +Orders with external ids can be looked up using the [GetOrderByExternalID](05_queries.md#getorderbyexternalid) query. + +External ids must be unique in a market, but multiple markets can use the same external id. + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_SET_IDS` in the market, and is not the `authority`. +* The order does not exist, or is in a different market than the provided `market_id`. +* The provided `external_id` equals the order's current `external_id`. +* The provided `external_id` is already associated with another order in the same market. + +#### MsgMarketSetOrderExternalIDRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L188-L202 + +#### MsgMarketSetOrderExternalIDResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L204-L205 + + +### MarketWithdraw + +When fees are collected by a market, they are given to the market's account. +Those funds can then be withdrawn/transferred using the `MarketWithdraw` endpoint. +The `admin` must have the `PERMISSION_WITHDRAW` permission in the market (or be the `authority`). + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_WITHDRAW` in the market, and is not the `authority`. +* The `amount` funds are not in the market's account. +* The `to_address` is not allowed to possess the requested funds. + +#### MsgMarketWithdrawRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L207-L221 + +#### MsgMarketWithdrawResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L223-L224 + + +### MarketUpdateDetails + +A market's details can be updated using the `MarketUpdateDetails` endpoint. +The `admin` must have the `PERMISSION_UPDATE` permission in the market (or be the `authority`). + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_UPDATE` in the market, and is not the `authority`. +* One or more of the [MarketDetails](#marketdetails) fields is too large. + +#### MsgMarketUpdateDetailsRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L226-L237 + +See also: [MarketDetails](#marketdetails). + +#### MsgMarketUpdateDetailsResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L239-L240 + + +### MarketUpdateEnabled + +A market can enable or disable order creation using the `MarketUpdateEnabled` endpoint. +The `admin` must have the `PERMISSION_UPDATE` permission in the market (or be the `authority`). + +With `accepting_orders` = `false`, no one can create any new orders in the market, but existing orders can still be settled or cancelled. + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_UPDATE` in the market, and is not the `authority`. +* The provided `accepting_orders` value equals the market's current setting. + +#### MsgMarketUpdateEnabledRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L242-L253 + +#### MsgMarketUpdateEnabledResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L255-L256 + + +### MarketUpdateUserSettle + +Using the `MarketUpdateUserSettle` endpoint, markets can control whether user-settlement is allowed. +The `admin` must have the `PERMISSION_UPDATE` permission in the market (or be the `authority`). + +The [FillBids](#fillbids) and [FillAsks](#fillasks) endpoints are only available for markets where `allow_user_settlement` = `true`. +The [MarketSettle](#marketsettle) endpoint is usable regardless of this setting. + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_UPDATE` in the market, and is not the `authority`. +* The provided `allow_user_settlement` value equals the market's current setting. + +#### MsgMarketUpdateUserSettleRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L258-L271 + +#### MsgMarketUpdateUserSettleResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L273-L274 + + +### MarketManagePermissions + +Permissions in a market are managed using the `MarketManagePermissions` endpoint. +The `admin` must have the `PERMISSION_PERMISSIONS` permission in the market (or be the `authority`). + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_PERMISSIONS` in the market, and is not the `authority`. +* One or more `revoke_all` addresses do not currently have any permissions in the market. +* One or more `to_revoke` entries do not currently exist in the market. +* One or more `to_grant` entries already exist in the market (after `revoke_all` and `to_revoke` are processed). + +#### MsgMarketManagePermissionsRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L276-L291 + +See also: [AccessGrant](#accessgrant) and [Permission](#permission). + +#### MsgMarketManagePermissionsResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L293-L295 + + +### MarketManageReqAttrs + +The attributes required to create orders in a market can be managed using the `MarketManageReqAttrs` endpoint. +The `admin` must have the `PERMISSION_ATTRIBUTES` permission in the market (or be the `authority`). + +See also: [Required Attributes](#required-attributes). + +It is expected to fail if: +* The market does not exist. +* The `admin` does not have `PERMISSION_ATTRIBUTES` in the market, and is not the `authority`. +* One or more attributes to add are already required by the market (for the given order type). +* One or more attributes to remove are not currently required by the market (for the given order type). + +#### MsgMarketManageReqAttrsRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L296-L313 + +#### MsgMarketManageReqAttrsResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L315-L316 + + +## Governance Proposals + +There are several governance-proposal-only endpoints. + + +### GovCreateMarket + +Market creation must be done via governance proposal with a `MsgGovCreateMarketRequest`. + +If the provided `market_id` is `0` (zero), the next available market id will be assigned to the new market. +If it is not zero, the provided `market_id` will be used (unless it's already in use by another market). +If it's already in use, the proposal will fail. + +It is recommended that the message be checked using the [ValidateCreateMarket](05_queries.md#validatecreatemarket) query first, to reduce the risk of failure or problems. + +It is expected to fail if: +* The provided `authority` is not the governance module's account. +* The provided `market_id` is not zero, and is already in use by another market. +* One or more of the [MarketDetails](#marketdetails) fields is too large. +* One or more required attributes are invalid. + +#### MsgGovCreateMarketRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L318-L329 + +#### Market + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/market.proto#L52-L103 + +#### MarketDetails + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/market.proto#L28-L40 + +* The `name` is limited to 250 characters max. +* The `description` is limited to 2000 characters max. +* The `website_url` is limited to 200 characters max. +* The `icon_uri` is limited to 2000 characters max. + +#### FeeRatio + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/market.proto#L105-L113 + +#### AccessGrant + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/market.proto#L115-L121 + +#### Permission + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/market.proto#L123-L141 + +#### MsgGovCreateMarketResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L331-L332 + + +### GovManageFees + +A market's fees can only be altered via governance proposal with a `MsgGovManageFeesRequest`. + +It is recommended that the message be checked using the [ValidateManageFees](05_queries.md#validatemanagefees) query first, to ensure the updated fees do not present any problems. + +It is expected to fail if: +* The provided `authority` is not the governance module's account. + +#### MsgGovManageFeesRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L334-L372 + +See also: [FeeRatio](#feeratio). + +#### MsgGovManageFeesResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L374-L375 + + +### GovUpdateParams + +The exchange module params are updated via governance proposal with a `MsgGovUpdateParamsRequest`. + +It is expected to fail if: +* The provided `authority` is not the governance module's account. + +#### MsgGovUpdateParamsRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L377-L386 + +See also: [Params](06_params.md#params). + +#### MsgGovUpdateParamsResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/tx.proto#L388-L389 diff --git a/docs/sdk/exchange/04_events.md b/docs/sdk/exchange/04_events.md new file mode 100644 index 000000000..5290a4151 --- /dev/null +++ b/docs/sdk/exchange/04_events.md @@ -0,0 +1,240 @@ +# Exchange Events + +The exchange module emits several events for various actions. + +--- + + - [EventOrderCreated](#eventordercreated) + - [EventOrderCancelled](#eventordercancelled) + - [EventOrderFilled](#eventorderfilled) + - [EventOrderPartiallyFilled](#eventorderpartiallyfilled) + - [EventOrderExternalIDUpdated](#eventorderexternalidupdated) + - [EventMarketWithdraw](#eventmarketwithdraw) + - [EventMarketDetailsUpdated](#eventmarketdetailsupdated) + - [EventMarketEnabled](#eventmarketenabled) + - [EventMarketDisabled](#eventmarketdisabled) + - [EventMarketUserSettleEnabled](#eventmarketusersettleenabled) + - [EventMarketUserSettleDisabled](#eventmarketusersettledisabled) + - [EventMarketPermissionsUpdated](#eventmarketpermissionsupdated) + - [EventMarketReqAttrUpdated](#eventmarketreqattrupdated) + - [EventMarketCreated](#eventmarketcreated) + - [EventMarketFeesUpdated](#eventmarketfeesupdated) + - [EventParamsUpdated](#eventparamsupdated) + + +## EventOrderCreated + +Any time an order is created, an `EventOrderCreated` is emitted. + +Event Type: `provenance.exchange.v1.EventOrderCreated` + +| Attribute Key | Attribute Value | +|---------------|-----------------------------------------------------------| +| order_id | The id of the order just created. | +| order_type | The type of the order just created (e.g. "ask" or "bid"). | +| market_id | The id of the market that the order was created in. | +| external_id | The external id of the order just created. | + + +## EventOrderCancelled + +When an order is cancelled (either by the owner or the market), an `EventOrderCancelled` is emitted. + +Event Type: `provenance.exchange.v1.EventOrderCancelled` + +| Attribute Key | Attribute Value | +|---------------|-------------------------------------------------------------| +| order_id | The id of the cancelled order. | +| cancelled_by | The bech32 address of the account that cancelled the order. | +| market_id | The id of the market that the cancelled order was in. | +| external_id | The external id of the order that was just cancelled. | + + +## EventOrderFilled + +When an order is filled in full, an `EventOrderFilled` is emitted. + +This event indicates that an order has been settled, cleared, and deleted. + +Event Type: `provenance.exchange.v1.EventOrderFilled` + +| Attribute Key | Attribute Value | +|---------------|------------------------------------------------------| +| order_id | The id of the settled order. | +| assets | The assets that were bought or sold (`Coin` string). | +| price | The price received (`Coin` string). | +| fees | The fees paid to settle the order (`Coins` string). | +| market_id | The id of the market that the order was in. | +| external_id | The external id of the order. | + +The `assets`, `price`, and `fees`, reflect the funds that were actually transferred. +E.g. when an ask order is settled for a higher price than set in the order, the `price` reflects what the seller actually received. +Similarly, the `fees` reflect the actual settlement fees paid (both flat and ratio) by the order's owner. + +If an order was previously partially filled, but now, the rest is being filled, this event is emitted. + + +## EventOrderPartiallyFilled + +When an order is partially filled, an `EventOrderPartiallyFilled` is emitted. + +This event indicates that some of an order was filled, but that the order has been reduced and still exists. + +Event Type: `provenance.exchange.v1.EventOrderPartiallyFilled` + +| Attribute Key | Attribute Value | +|---------------|--------------------------------------------------------------------------| +| order_id | The id of the partially settled order. | +| assets | The assets that were bought or sold (`Coin` string). | +| price | The price received (`Coin` string). | +| fees | The fees paid for the partial settlement of this order (`Coins` string). | +| market_id | The id of the market that the order is in. | +| external_id | The external id of the order. | + +The `assets`, `price`, and `fees`, reflect the funds that were actually transferred. + +If an order was previously partially filled, but now, the rest is being filled, an `EventOrderFilled` is emitted. + + +## EventOrderExternalIDUpdated + +When an order's external id is updated, an `EventOrderExternalIDUpdated` is emitted. + +Event Type: `provenance.exchange.v1.EventOrderExternalIDUpdated` + +| Attribute Key | Attribute Value | +|----------------|--------------------------------------------| +| order_id | The id of the updated order. | +| market_id | The id of the market that the order is in. | +| external_id | The new external id of the order. | + + +## EventMarketWithdraw + +Any time a market's funds are withdrawn, an `EventMarketWithdraw` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketWithdraw` + +| Attribute Key | Attribute Value | +|---------------|--------------------------------------------------------------------------| +| market_id | The id of the market the funds were withdrawn from. | +| amount | The funds withdrawn (`Coins` string). | +| destination | The bech32 address string of the account that received the funds. | +| withdrawn_by | The bech32 address string of the admin account that made the withdrawal. | + + +## EventMarketDetailsUpdated + +When a market's details are updated, an `EventMarketDetailsUpdated` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketDetailsUpdated` + +| Attribute Key | Attribute Value | +|---------------|-----------------------------------------------------------------------| +| market_id | The id of the updated market. | +| updated_by | The bech32 address string of the admin account that made the change. | + + +## EventMarketEnabled + +When a market's `accepting_orders` changes from `false` to `true`, an `EventMarketEnabled` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketEnabled` + +| Attribute Key | Attribute Value | +|---------------|----------------------------------------------------------------------| +| market_id | The id of the updated market. | +| updated_by | The bech32 address string of the admin account that made the change. | + + +## EventMarketDisabled + +When a market's `accepting_orders` changes from `true` to `false`, an `EventMarketDisabled` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketDisabled` + +| Attribute Key | Attribute Value | +|---------------|----------------------------------------------------------------------| +| market_id | The id of the updated market. | +| updated_by | The bech32 address string of the admin account that made the change. | + + +## EventMarketUserSettleEnabled + +When a market's `allow_user_settlement` changes from `false` to `true`, an `EventMarketUserSettleEnabled` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketUserSettleEnabled` + +| Attribute Key | Attribute Value | +|---------------|----------------------------------------------------------------------| +| market_id | The id of the updated market. | +| updated_by | The bech32 address string of the admin account that made the change. | + + +## EventMarketUserSettleDisabled + +When a market's `allow_user_settlement` changes from `true` to `false`, an `EventMarketUserSettleDisabled` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketUserSettleDisabled` + +| Attribute Key | Attribute Value | +|---------------|----------------------------------------------------------------------| +| market_id | The id of the updated market. | +| updated_by | The bech32 address string of the admin account that made the change. | + + +## EventMarketPermissionsUpdated + +Any time a market's permissions are managed, an `EventMarketPermissionsUpdated` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketPermissionsUpdated` + +| Attribute Key | Attribute Value | +|---------------|----------------------------------------------------------------------| +| market_id | The id of the updated market. | +| updated_by | The bech32 address string of the admin account that made the change. | + + +## EventMarketReqAttrUpdated + +When a market's required attributes are altered, an `EventMarketReqAttrUpdated` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketReqAttrUpdated` + +| Attribute Key | Attribute Value | +|---------------|----------------------------------------------------------------------| +| market_id | The id of the updated market. | +| updated_by | The bech32 address string of the admin account that made the change. | + + +## EventMarketCreated + +When a market is created, an `EventMarketCreated` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketCreated` + +| Attribute Key | Attribute Value | +|---------------|---------------------------| +| market_id | The id of the new market. | + + +## EventMarketFeesUpdated + +When a market's fees are updated, an `EventMarketFeesUpdated` is emitted. + +Event Type: `provenance.exchange.v1.EventMarketFeesUpdated` + +| Attribute Key | Attribute Value | +|---------------|-------------------------------| +| market_id | The id of the updated market. | + + +## EventParamsUpdated + +An `EventParamsUpdated` is emitted when the exchange module's params are changed. + +Event Type: `provenance.exchange.v1.EventParamsUpdated` + +| Attribute Key | Attribute Value | +|---------------|-----------------| +| (none) | | diff --git a/docs/sdk/exchange/05_queries.md b/docs/sdk/exchange/05_queries.md new file mode 100644 index 000000000..1dec79ce4 --- /dev/null +++ b/docs/sdk/exchange/05_queries.md @@ -0,0 +1,256 @@ +# Exchange Queries + +There are several queries for getting information about things in the exchange module. + +--- + + - [OrderFeeCalc](#orderfeecalc) + - [GetOrder](#getorder) + - [GetOrderByExternalID](#getorderbyexternalid) + - [GetMarketOrders](#getmarketorders) + - [GetOwnerOrders](#getownerorders) + - [GetAssetOrders](#getassetorders) + - [GetAllOrders](#getallorders) + - [GetMarket](#getmarket) + - [GetAllMarkets](#getallmarkets) + - [Params](#params) + - [ValidateCreateMarket](#validatecreatemarket) + - [ValidateMarket](#validatemarket) + - [ValidateManageFees](#validatemanagefees) + + +## OrderFeeCalc + +The `OrderFeeCalc` query is used to find out the various required fee options for a given order. +The idea is that you can provide your [AskOrder](03_messages.md#askorder) or [BidOrder](03_messages.md#bidorder) in this query in order to identify what fees you'll need to pay. + +Either an `ask_order` or a `bid_order` must be provided, but not both. + +Each response field is a list of options available for the requested order. +If a response field is empty, then no fee of that type is required. + +When creating the `AskOrder`, choose one entry from `creation_fee_options` to provide as the `order_creation_fee`. +Then, choose one entry from `settlement_flat_fee_options` and provide that as the `seller_settlement_flat_fee`. +For ask orders, the `settlement_ratio_fee_options` is purely informational and is the minimum that seller's settlement ratio fee that will be for the order. + +When creating the `BidOrder`, choose one entry from `creation_fee_options` to provide as the `order_creation_fee`. +Then choose one entry from each of `settlement_flat_fee_options` and `settlement_ratio_fee_options`, add them together, and provide that as the `buyer_settlement_fees`. + +### QueryOrderFeeCalcRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L96-L103 + +See also: [AskOrder](03_messages.md#askorder), and [BidOrder](03_messages.md#bidorder). + +### QueryOrderFeeCalcResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L105-L124 + + +## GetOrder + +Use the `GetOrder` query to look up an order by its id. + +### QueryGetOrderRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L126-L130 + +### QueryGetOrderResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L132-L136 + +### Order + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/orders.proto#L13-L26 + +See also: [AskOrder](03_messages.md#askorder), and [BidOrder](03_messages.md#bidorder). + + +## GetOrderByExternalID + +Orders with external ids can be looked up using the `GetOrderByExternalID` query. + +### QueryGetOrderByExternalIDRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L138-L144 + +### QueryGetOrderByExternalIDResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L146-L150 + +See also: [Order](#order). + + +## GetMarketOrders + +To get all of the orders in a given market, use the `GetMarketOrders` query. +Results can be optionally limited by order type (e.g. "ask" or "bid") and/or a minimum (exclusive) order id. + +This query is paginated. + +### QueryGetMarketOrdersRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L152-L163 + +### QueryGetMarketOrdersResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L165-L172 + +See also: [Order](#order). + + +## GetOwnerOrders + +To get all of the orders with a specific owner (e.g. buyer or seller), use the `GetOwnerOrders` query. +Results can be optionally limited by order type (e.g. "ask" or "bid") and/or a minimum (exclusive) order id. + +This query is paginated. + +### QueryGetOwnerOrdersRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L174-L185 + +### QueryGetOwnerOrdersResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L187-L194 + +See also: [Order](#order). + + +## GetAssetOrders + +To get all of the orders with a specific asset denom, use the `GetAssetOrders` query. +Results can be optionally limited by order type (e.g. "ask" or "bid") and/or a minimum (exclusive) order id. + +This query is paginated. + +### QueryGetAssetOrdersRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L196-L207 + +### QueryGetAssetOrdersResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L209-L216 + +See also: [Order](#order). + + +## GetAllOrders + +To get all existing orders, use the `GetAllOrders` query. + +This query is paginated. + +### QueryGetAllOrdersRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L218-L222 + +### QueryGetAllOrdersResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L224-L231 + +See also: [Order](#order). + + +## GetMarket + +All the information and setup for a market can be looked up using the `GetMarket` query. + +### QueryGetMarketRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L233-L237 + +### QueryGetMarketResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L239-L245 + +See also: [Market](03_messages.md#market). + + +## GetAllMarkets + +Use the `GetAllMarkets` query to get brief information about all markets. + +### QueryGetAllMarketsRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L247-L251 + +### QueryGetAllMarketsResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L253-L260 + +### MarketBrief + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/market.proto#L42-L50 + + +## Params + +The exchange module params can be looked up using the `Params` query. + +### QueryParamsRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L262-L263 + +### QueryParamsResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L265-L269 + +See also: [Params](06_params.md#params). + + +## ValidateCreateMarket + +It's possible for a [MsgGovCreateMarketRequest](03_messages.md#msggovcreatemarketrequest) to result in a market setup that is problematic. +To verify that one is not problematic, this `ValidateCreateMarket` can be used. + +If the result has: +* `gov_prop_will_pass` = `false`, then either submitting the proposal will fail, or the `Msg` will result in an error ("failed") after the proposal is passed. The `error` field will have details. +* `gov_prop_will_pass` = `true` and a non-empty `error` field, then the `Msg` would successfully run, but would result in the problems identified in the `error` field. +* `gov_prop_will_pass` = `true` and an empty `error` field, then there are no problems with the provided `Msg`. + +### QueryValidateCreateMarketRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L271-L275 + +See also: [MsgGovCreateMarketRequest](03_messages.md#msggovcreatemarketrequest). + +### QueryValidateCreateMarketResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L277-L287 + + +## ValidateMarket + +An existing market's setup can be checked for problems using the `ValidateMarket` query. + +Any problems detected will be returned in the `error` field. + +### QueryValidateMarketRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L289-L293 + +### QueryValidateMarketResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L295-L299 + + +## ValidateManageFees + +It's possible for a [MsgGovManageFeesRequest](03_messages.md#msggovmanagefeesrequest) to result in a problematic setup for a market. +To verify that one does not result in such a state, use this `ValidateManageFees` query. + +If the result has: +* `gov_prop_will_pass` = `false`, then either submitting the proposal will fail, or the `Msg` will result in an error ("failed") after the proposal is passed. The `error` field will have details. +* `gov_prop_will_pass` = `true` and a non-empty `error` field, then the `Msg` would successfully run, but would result in the problems identified in the `error` field. +* `gov_prop_will_pass` = `true` and an empty `error` field, then there are no problems with the provided `Msg`. + +### QueryValidateManageFeesRequest + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L301-L305 + +See also: [MsgGovManageFeesRequest](03_messages.md#msggovmanagefeesrequest). + +### QueryValidateManageFeesResponse + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/query.proto#L307-L317 diff --git a/docs/sdk/exchange/06_params.md b/docs/sdk/exchange/06_params.md new file mode 100644 index 000000000..d2450a867 --- /dev/null +++ b/docs/sdk/exchange/06_params.md @@ -0,0 +1,25 @@ +# Exchange Params + +The exchange module params dictate how much of the fees (collected by markets) go to the exchange/chain. +The split values are in basis points and are limited to between `0` and `10,000` (both inclusive). +The `default_split` is used when a specific `DenomSplit` does not exist for a given denom. + +* A split of `0` is 0% and would mean that the exchange receives none of the fees (of the applicable denom), and the market keeps all of it. +* A split of `500` is 5%, and would mean that the exchange receives 5% of the fees (of the applicable denom) collected by any market, and the market keeps 95%. +* A split of `10,000` is 100% and would mean that the exchange receives all of the fees (of the applicable denom) and the market gets nothing. + +The default `Params` have a `default_split` of `500` and no `DenomSplit`s. + +Params are set using the [GovUpdateParams](03_messages.md#govupdateparams) governance proposal endpoint. + +The current params can be looked up using the [Params](05_queries.md#params) query. + +See also: [Exchange Fees](01_concepts.md#exchange-fees). + +## Params + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/params.proto#L11-L19 + +## DenomSplit + ++++ https://github.com/provenance-io/provenance/blob/v1.17.0/proto/provenance/exchange/v1/params.proto#L21-L28 diff --git a/docs/sdk/exchange/README.md b/docs/sdk/exchange/README.md new file mode 100644 index 000000000..a51d8b0d3 --- /dev/null +++ b/docs/sdk/exchange/README.md @@ -0,0 +1,16 @@ +# `x/exchange` + +## Overview + +The exchange module is used to facilitate the trading of on-chain assets. +Funds being traded remain (on hold) in the buyers' and sellers' accounts. +Settlement is done directly between the two parties without the funds touching a 3rd party's account. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[Messages](03_messages.md)** +4. **[Events](04_events.md)** +5. **[Queries](05_queries.md)** +6. **[Params](06_params.md)** diff --git a/docs/sdk/hold/01_concepts.md b/docs/sdk/hold/01_concepts.md new file mode 100644 index 000000000..855f94197 --- /dev/null +++ b/docs/sdk/hold/01_concepts.md @@ -0,0 +1,31 @@ +# Concepts + +The `x/hold` module is designed for use by other modules that need to lock funds in place in an account. + + + - [Holds](#holds) + - [Managing Holds](#managing-holds) + - [Locked Coins](#locked-coins) + +## Holds + +"Holds" are an amount of funds in an account that must not be moved out of the account. +When a hold is placed on some funds, a record of them is created in the `x/hold` module. +When funds are released from a hold, those records are updated appropriately. + +Funds with a hold on them remain in the owners account but cannot be spent, sent, delegated or otherwise removed from the account until they are released from hold. + +A hold can only be placed on funds that would otherwise be spendable. E.g. you can place a hold on vested funds, but not unvested funds. + +## Managing Holds + +The `x/hold` module does not have any `Msg` or `Tx` endpoints for managing holds. +Putting holds on funds and releasing holds are actions that are only available via keeper functions. +It is expected that other modules will use the keeper functions (e.g.`AddHold` and `ReleaseHold`) as needed. + +## Locked Coins + +The `x/hold` module injects a `GetLockedCoinsFn` into the bank keeper in order to tell it which funds have a hold on them. +This allows the bank module and keeper functions to take holds into account when reporting bank account information. +Specifically, the bank keeper functions, `LockedCoins`, and `SpendableCoins` will reflect holds, as well as the `SpendableBalances` query. +The `AllBalances` query and similar keeper functions will still include the held funds though, since the funds actually **are** still in the account. diff --git a/docs/sdk/hold/02_state.md b/docs/sdk/hold/02_state.md new file mode 100644 index 000000000..4ed015fbd --- /dev/null +++ b/docs/sdk/hold/02_state.md @@ -0,0 +1,22 @@ +# State + +The `x/hold` module uses key/value pairs to store hold-related data in state. + +## Holds + +Holds on funds are recorded by address and denom using the following record format: + +``` +0x00 | len(
) |
| -> +``` + +Where: + +* `0x00` is the type byte, and has a value of `0` for these records. +* `len(
)` is a single byte containing the length of the `
` as an 8-bit byte in big-endian order. +* `
` is the raw bytes of the address of the account that the funds are in. +* `` is the denomination string of the coin being held. +* `` is a string representation of the numerical amount being held. + +Records are created, increased and decreased as needed. +If the `` is reduced to zero, the record is deleted. diff --git a/docs/sdk/hold/03_events.md b/docs/sdk/hold/03_events.md new file mode 100644 index 000000000..c1938592d --- /dev/null +++ b/docs/sdk/hold/03_events.md @@ -0,0 +1,59 @@ +# Events + +The `x/hold` module emits the following events: + + + - [EventHoldAdded](#eventholdadded) + - [EventHoldReleased](#eventholdreleased) + +## EventHoldAdded + +This event is emitted when a hold is placed on some funds. + +`@Type`: `provenance.hold.v1.EventHoldAdded` + +| Attribute Key | Attribute Value | +|---------------|-----------------------------------------| +| address | bech32 string of account with the funds | +| amount | string of coins newly placed on hold | +| reason | human readable string | + +All values are wrapped in double quotes. + +Example: + +```json +{ + "type": "provenance.hold.v1.EventHoldAdded", + "attributes": [ + {"key": "address", "value": "\"pb1v9jxgun9wde476twta6xse2lv4mx2mn56s5hm4\""}, + {"key": "amount", "value": "\"1000000000nhash,5000musdf\""} + {"key": "reason", "value": "\"order 66\""} + ] +} +``` + +## EventHoldReleased + +This event is emitted when some held funds are released. + +`@Type`: `provenance.hold.v1.EventHoldReleased` + +| Attribute Key | Attribute Value | +|---------------|-----------------------------------------| +| address | bech32 string of account with the funds | +| amount | string of the coins just released | + +Both values are wrapped in double quotes. + +Example: + +```json +{ + "type": "provenance.hold.v1.EventHoldReleased", + "attributes": [ + {"key": "address", "value": "\"pb1v9jxgun9wde476twta6xse2lv4mx2mn56s5hm4\""}, + {"key": "amount", "value": "\"1000000000nhash,5000musdf\""} + ] +} +``` diff --git a/docs/sdk/hold/04_queries.md b/docs/sdk/hold/04_queries.md new file mode 100644 index 000000000..a50e37ed2 --- /dev/null +++ b/docs/sdk/hold/04_queries.md @@ -0,0 +1,41 @@ +# Queries + +The `x/hold` module provides some queries for looking up hold-related data. + + + - [GetHolds](#getholds) + - [GetAllHolds](#getallholds) + +## GetHolds + +To look up the funds on hold for an account, use the `GetHolds` query. +The query takes in an `address` and returns a coins `amount`. + +Request: + ++++ https://github.com/provenance-io/provenance/blob/dwedul/1607-in-place-escrow/proto/provenance/hold/v1/query.proto#L28-L35 + +Response: + ++++ https://github.com/provenance-io/provenance/blob/dwedul/1607-in-place-escrow/proto/provenance/hold/v1/query.proto#L37-L45 + +It is expected to fail if the `address` is invalid or missing. + +If the account doesn't exist, or no coins are on hold for the account, the amount will be empty. + +## GetAllHolds + +To get all funds on hold for all accounts, use the `GetAllHolds` query. +The query takes in pagination parameters and returns a list of `address`/`amount` pairs. + +Request: + ++++ https://github.com/provenance-io/provenance/blob/dwedul/1607-in-place-escrow/proto/provenance/hold/v1/query.proto#L47-L54 + +Response: + ++++ https://github.com/provenance-io/provenance/blob/dwedul/1607-in-place-escrow/proto/provenance/hold/v1/query.proto#L56-L62 + ++++ https://github.com/provenance-io/provenance/blob/dwedul/1607-in-place-escrow/proto/provenance/hold/v1/hold.proto#L12-L19 + +It is expected to fail if the pagination parameters are invalid. diff --git a/docs/sdk/hold/README.md b/docs/sdk/hold/README.md new file mode 100644 index 000000000..4d157a23a --- /dev/null +++ b/docs/sdk/hold/README.md @@ -0,0 +1,12 @@ +# `x/hold` + +## Overview + +The Hold module keeps track of funds in an account that have a hold placed on them, and are therefore locked. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[Events](03_events.md)** +4. **[Queries](04_queries.md)** diff --git a/docs/sdk/ibchooks/README.md b/docs/sdk/ibchooks/README.md new file mode 100644 index 000000000..b11976a1c --- /dev/null +++ b/docs/sdk/ibchooks/README.md @@ -0,0 +1,311 @@ +# `x/ibchooks` + +## Notice + +**This module was forked from https://github.com/osmosis-labs/osmosis/tree/main/x/ibchooks ** + +_Unfortunately the original version could not be directly used due to extensive osmosis references, an incompatible Cosmos SDK version, and lack of support for IBC v6.x._ + +## Wasm Hooks + +The wasm hook is an IBC middleware which is used to allow ICS-20 token transfers to initiate contract calls. +This allows cross-chain contract calls, that involve token movement. +This is useful for a variety of usecases. +One of primary importance is cross-chain swaps, which is an extremely powerful primitive. + +The mechanism enabling this is a `memo` field on every ICS20 transfer packet as of [IBC v3.4.0](https://medium.com/the-interchain-foundation/moving-beyond-simple-token-transfers-d42b2b1dc29b). +Wasm hooks is an IBC middleware that parses an ICS20 transfer, and if the `memo` field is of a particular form, executes a wasm contract call. We now detail the `memo` format for `wasm` contract calls, and the execution guarantees provided. + +### Cosmwasm Contract Execution Format + +Before we dive into the IBC metadata format, we show the cosmwasm execute message format, so the reader has a sense of what are the fields we need to be setting in. +The cosmwasm `MsgExecuteContract` is defined [here](https://github.com/CosmWasm/wasmd/blob/4fe2fbc8f322efdaf187e2e5c99ce32fd1df06f0/x/wasm/types/tx.pb.go#L340-L349 +) as the following type: + +```go +type MsgExecuteContract struct { + // Sender is the actor that committed the message in the sender chain + Sender string + // Contract is the address of the smart contract + Contract string + // Msg json encoded message to be passed to the contract + Msg RawContractMessage + // Funds coins that are transferred to the contract on execution + Funds sdk.Coins +} +``` + +So we detail where we want to get each of these fields from: + +* Sender: We cannot trust the sender of an IBC packet, the counterparty chain has full ability to lie about it. +We cannot risk this sender being confused for a particular user or module address on Osmosis. +So we replace the sender with an account to represent the sender prefixed by the channel and a wasm module prefix. +This is done by setting the sender to `Bech32(Hash("ibc-wasm-hook-intermediary" || channelID || sender))`, where the channelId is the channel id on the local chain. +* Contract: This field should be directly obtained from the ICS-20 packet metadata +* Msg: This field should be directly obtained from the ICS-20 packet metadata. +* Funds: This field is set to the amount of funds being sent over in the ICS 20 packet. One detail is that the denom in the packet is the counterparty chains representation of the denom, so we have to translate it to Osmosis' representation. + +> **_WARNING:_** Due to a [bug](https://twitter.com/SCVSecurity/status/1682329758020022272) in the packet forward middleware, we cannot trust the sender from chains that use PFM. Until that is fixed, we recommend chains to not trust the sender on contracts executed via IBC hooks. + +So our constructed cosmwasm message that we execute will look like: + +```go +msg := MsgExecuteContract{ + // Sender is the that actor that signed the messages + Sender: "osmo1-hash-of-channel-and-sender", + // Contract is the address of the smart contract + Contract: packet.data.memo["wasm"]["ContractAddress"], + // Msg json encoded message to be passed to the contract + Msg: packet.data.memo["wasm"]["Msg"], + // Funds coins that are transferred to the contract on execution + Funds: sdk.NewCoin{Denom: ibc.ConvertSenderDenomToLocalDenom(packet.data.Denom), Amount: packet.data.Amount} +``` + +### ICS20 packet structure + +So given the details above, we propogate the implied ICS20 packet data structure. +ICS20 is JSON native, so we use JSON for the memo format. + +```json +{ + //... other ibc fields that we don't care about + "data":{ + "denom": "denom on counterparty chain (e.g. uatom)", // will be transformed to the local denom (ibc/...) + "amount": "1000", + "sender": "addr on counterparty chain", // will be transformed + "receiver": "contract addr or blank", + "memo": { + "wasm": { + "contract": "osmo1contractAddr", + "msg": { + "raw_message_fields": "raw_message_data", + } + } + } + } +} +``` + +An ICS20 packet is formatted correctly for wasmhooks iff the following all hold: + +* `memo` is not blank +* `memo` is valid JSON +* `memo` has at least one key, with value `"wasm"` +* `memo["wasm"]` has exactly two entries, `"contract"` and `"msg"` +* `memo["wasm"]["msg"]` is a valid JSON object +* `receiver == "" || receiver == memo["wasm"]["contract"]` + +We consider an ICS20 packet as directed towards wasmhooks iff all of the following hold: + +* `memo` is not blank +* `memo` is valid JSON +* `memo` has at least one key, with name `"wasm"` + +If an ICS20 packet is not directed towards wasmhooks, wasmhooks doesn't do anything. +If an ICS20 packet is directed towards wasmhooks, and is formated incorrectly, then wasmhooks returns an error. + +### Execution flow + +Pre wasm hooks: + +* Ensure the incoming IBC packet is cryptogaphically valid +* Ensure the incoming IBC packet is not timed out. + +In Wasm hooks, pre packet execution: + +* Ensure the packet is correctly formatted (as defined above) +* Edit the receiver to be the hardcoded IBC module account + +In wasm hooks, post packet execution: + +* Construct wasm message as defined before +* Execute wasm message +* if wasm message has error, return ErrAck +* otherwise continue through middleware + +## Ack callbacks + +A contract that sends an IBC transfer, may need to listen for the ACK from that packet. To allow +contracts to listen on the ack of specific packets, we provide Ack callbacks. + +### Design + +The sender of an IBC transfer packet may specify a callback for when the ack of that packet is received in the memo +field of the transfer packet. + +Crucially, _only_ the IBC packet sender can set the callback. + +### Use case + +The crosschain swaps implementation sends an IBC transfer. If the transfer were to fail, we want to allow the sender +to be able to retrieve their funds (which would otherwise be stuck in the contract). To do this, we allow users to +retrieve the funds after the timeout has passed, but without the ack information, we cannot guarantee that the send +hasn't failed (i.e.: returned an error ack notifying that the receiving change didn't accept it) + +### Implementation + +#### Callback information in memo + +For the callback to be processed, the transfer packet's memo should contain the following in its JSON: + +`{"ibc_callback": "osmo1contractAddr"}` + +The wasm hooks will keep the mapping from the packet's channel and sequence to the contract in storage. When an ack is +received, it will notify the specified contract via a sudo message. + +#### Interface for receiving the Acks and Timeouts + +The contract that awaits the callback should implement the following interface for a sudo message: + +```rust +#[cw_serde] +pub enum IBCLifecycleComplete { + #[serde(rename = "ibc_ack")] + IBCAck { + /// The source channel (osmosis side) of the IBC packet + channel: String, + /// The sequence number that the packet was sent with + sequence: u64, + /// String encoded version of the ack as seen by OnAcknowledgementPacket(..) + ack: String, + /// Weather an ack is a success of failure according to the transfer spec + success: bool, + }, + #[serde(rename = "ibc_timeout")] + IBCTimeout { + /// The source channel (osmosis side) of the IBC packet + channel: String, + /// The sequence number that the packet was sent with + sequence: u64, + }, +} + +/// Message type for `sudo` entry_point +#[cw_serde] +pub enum SudoMsg { + #[serde(rename = "ibc_lifecycle_complete")] + IBCLifecycleComplete(IBCLifecycleComplete), +} +``` + +### Async Acks + +IBC supports the ability to send an ack back to the sender of the packet asynchronously. This is useful for +cases where the packet is received, but the ack is not immediately known. For example, if the packet is being +forwarded to another chain, the ack may not be known until the packet is received on the other chain. + +Note this ACK does not imply full revertability. It is possible that unrevertable actions have occurred +even if there is an Ack Error. (This is distinct from the behavior of ICS-20 transfers). If you want to ensure +revertability, your contract should be implemented in a way that actions are not finalized until a success ack +is received. + +#### Use case + +Async acks are useful in cases where the contract needs to wait for a response from another chain before +returning a result to the caller. + +For example, if you want to send tokens to another chain after the contract is executed you need to +add a new ibc packet and wait for its ack. + +In the synchronous acks case, the caller will receive an ack from the contract before the second packet +has been processed. This means that the caller will have to wait (and potentially track) if the second +packet has been processed successfully or not. + +With async acks, you contract can take this responsibility and only send an ack to the caller once the +second packet has been processed + +#### Making contract Acks async + +To support this, we allow contracts to return an `IBCAsync` response from the function being executed when the +packet is received. That response specifies that the ack should be handled asynchronously. + +Concretely the contract should return: + +```rust +#[cw_serde] +pub struct OnRecvPacketAsyncResponse { + pub is_async_ack: bool, +} +``` + +if `is_async_ack` is set to true, `OnRecvPacket` will return `nil` and the ack will not be written. Instead, the +contract wil be stored as the "ack actor" for the packet so that only that contract is allowed to send an ack +for it. + +It is up to the contract developers to decide which conditions will trigger the ack to be sent. + +#### Sending an async ack + +To send the async ack, the contract needs to send the MsgEmitIBCAck message to the chain. This message will +then make a sudo call to the contract requesting the ack and write the ack to state. + +That message can be specified in the contract as: + +```rust +#[derive( + Clone, + PartialEq, + Eq, + ::prost::Message, + serde::Serialize, + serde::Deserialize, + schemars::JsonSchema, + CosmwasmExt, +)] +#[proto_message(type_url = "/osmosis.ibchooks.MsgEmitIBCAck")] +pub struct MsgEmitIBCAck { + #[prost(string, tag = "1")] + pub sender: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub packet_sequence: u64, + #[prost(string, tag = "3")] + pub channel: ::prost::alloc::string::String, +} +``` + +The contract is expected to implement the following sudo message handler: + +```rust +#[cw_serde] +pub enum IBCAsyncOptions { + #[serde(rename = "request_ack")] + RequestAck { + /// The source channel (osmosis side) of the IBC packet + source_channel: String, + /// The sequence number that the packet was sent with + packet_sequence: u64, + }, +} + +#[cw_serde] +pub enum SudoMsg { + #[serde(rename = "ibc_async")] + IBCAsync(IBCAsyncOptions), +} +``` + +and that sudo call should return an `IBCAckResponse`: + +```rust +#[cw_serde] +#[serde(tag = "type", content = "content")] +pub enum IBCAck { + AckResponse{ + packet: Packet, + contract_ack: ContractAck, + }, + AckError { + packet: Packet, + error_description: String, + error_response: String, + } +} +``` + +Note: the sudo call is required to potentially allow anyone to send the MsgEmitIBCAck message. For now, however, +this is artificially limited so that the message can only be send by the same contract. This could be expanded in +the future if needed. + +# Testing strategy + +See go tests.` \ No newline at end of file diff --git a/docs/sdk/ibcratelimit/README.md b/docs/sdk/ibcratelimit/README.md new file mode 100644 index 000000000..5e0f47379 --- /dev/null +++ b/docs/sdk/ibcratelimit/README.md @@ -0,0 +1,316 @@ +# ibcratelimit + +## Notice + +**This module was forked from https://github.com/osmosis-labs/osmosis/tree/main/x/ibc-rate-limit ** + +_Unfortunately the original version could not be directly used due to extensive osmosis references, an incompatible Cosmos SDK version, and lack of support for IBC v6.x._ + + +# IBC Rate Limit + +The IBC Rate Limit module is responsible for adding a governance-configurable rate limit to IBC transfers. +This is a safety control, intended to protect assets on osmosis in event of: + +* a bug/hack on osmosis +* a bug/hack on the counter-party chain +* a bug/hack in IBC itself + +This is done in exchange for a potential (one-way) bridge liveness tradeoff, in periods of high deposits or withdrawals. + +The architecture of this package is a minimal go package which implements an [IBC Middleware](https://github.com/cosmos/ibc-go/blob/f57170b1d4dd202a3c6c1c61dcf302b6a9546405/docs/ibc/middleware/develop.md) that wraps the [ICS20 transfer](https://ibc.cosmos.network/main/apps/transfer/overview.html) app, and calls into a cosmwasm contract. +The cosmwasm contract then has all of the actual IBC rate limiting logic. +The Cosmwasm code can be found in the [`contracts`](./contracts/) package, with bytecode findable in the [`bytecode`](./bytecode/) folder. The cosmwasm VM usage allows Osmosis chain governance to choose to change this safety control with no hard forks, via a parameter change proposal, a great mitigation for faster threat adaptavity. + +The status of the module is being in a state suitable for some initial governance settable rate limits for high value bridged assets. +Its not in its long term / end state for all channels by any means, but does act as a strong protection we +can instantiate today for high value IBC connections. + +## Motivation + +The motivation of IBC-rate-limit comes from the empirical observations of blockchain bridge hacks that a rate limit would have massively reduced the stolen amount of assets in: + +- [Polynetwork Bridge Hack ($611 million)](https://rekt.news/polynetwork-rekt/) +- [BNB Bridge Hack ($586 million)](https://rekt.news/bnb-bridge-rekt/) +- [Wormhole Bridge Hack ($326 million)](https://rekt.news/wormhole-rekt/) +- [Nomad Bridge Hack ($190 million)](https://rekt.news/nomad-rekt/) +- [Harmony Bridge Hack ($100 million)](https://rekt.news/harmony-rekt/) - (Would require rate limit + monitoring) +- [Dragonberry IBC bug](https://forum.cosmos.network/t/ibc-security-advisory-dragonberry/7702) (can't yet disclose amount at risk, but was saved due to being found first by altruistic Osmosis core developers) + +In the presence of a software bug on Osmosis, IBC itself, or on a counterparty chain, we would like to prevent the bridge from being fully depegged. +This stems from the idea that a 30% asset depeg is ~infinitely better than a 100% depeg. +Its _crazy_ that today these complex bridged assets can instantly go to 0 in event of bug. +The goal of a rate limit is to raise an alert that something has potentially gone wrong, allowing validators and developers to have time to analyze, react, and protect larger portions of user funds. + +The thesis of this is that, it is worthwile to sacrifice liveness in the case of legitimate demand to send extreme amounts of funds, to prevent the terrible long-tail full fund risks. +Rate limits aren't the end-all of safety controls, they're merely the simplest automated one. More should be explored and added onto IBC! + +## Rate limit types + +We express rate limits in time-based periods. +This means, we set rate limits for (say) 6-hour, daily, and weekly intervals. +The rate limit for a given time period stores the relevant amount of assets at the start of the rate limit. +Rate limits are then defined on percentage terms of the asset. +The time windows for rate limits are currently _not_ rolling, they have discrete start/end times. + +We allow setting separate rate limits for the inflow and outflow of assets. +We do all of our rate limits based on the _net flow_ of assets on a channel pair. This prevents DOS issues, of someone repeatedly sending assets back and forth, to trigger rate limits and break liveness. + +We currently envision creating two kinds of rate limits: + +* Per denomination rate limits + - allows safety statements like "Only 30% of Stars on Osmosis can flow out in one day" or "The amount of Atom on Osmosis can at most double per day". +* Per channel rate limits + - Limit the total inflow and outflow on a given IBC channel, based on "USDC" equivalent, using Osmosis as the price oracle. + +We currently only implement per denomination rate limits for non-native assets. We do not yet implement channel based rate limits. + +Currently these rate limits automatically "expire" at the end of the quota duration. TODO: Think of better designs here. E.g. can we have a constant number of subsequent quotas start filled? Or perhaps harmonically decreasing amounts of next few quotas pre-filled? Halted until DAO override seems not-great. + +## Instantiating rate limits + +Today all rate limit quotas must be set manually by governance. +In the future, we should design towards some conservative rate limit to add as a safety-backstop automatically for channels. +Ideas for how this could look: + +* One month after a channel has been created, automatically add in some USDC-based rate limit +* One month after governance incentivizes an asset, add on a per-denomination rate limit. + +Definitely needs far more ideation and iteration! + +## Parameterizing the rate limit + +One element is we don't want any rate limit timespan thats too short, e.g. not enough time for humans to react to. So we wouldn't want a 1 hour rate limit, unless we think that if its hit, it could be assessed within an hour. + +### Handling rate limit boundaries + +We want to be safe against the case where say we have a daily rate limit ending at a given time, and an adversary attempts to attack near the boundary window. +We would not like them to be able to "double extract funds" by timing their extraction near a window boundary. + +Admittedly, not a lot of thought has been put into how to deal with this well. +Right now we envision simply handling this by saying if you want a quota of duration D, instead include two quotas of duration D, but offset by `D/2` from each other. + +Ideally we can change windows to be more 'rolling' in the future, to avoid this overhead and more cleanly handle the problem. (Perhaps rolling ~1 hour at a time) + +### Inflow parameterization + +The "Inflow" side of a rate limit is essentially protection against unforeseen bug on a counterparty chain. +This can be quite conservative (e.g. bridged amount doubling in one week). This covers a few cases: + +* Counter-party chain B having a token theft attack + - TODO: description of how this looks +* Counter-party chain B runaway mint + - TODO: description of how this looks +* IBC theft + - TODO: description of how this looks + +It does get more complex when the counterparty chain is itself a DEX, but this is still much more protection than nothing. + +### Outflow parameterization + +The "Outflow" side of a rate limit is protection against a bug on Osmosis OR IBC. +This has potential for much more user-frustrating issues, if set too low. +E.g. if theres some event that causes many people to suddenly withdraw many STARS or many USDC. + +So this parameterization has to contend with being a tradeoff of withdrawal liveness in high volatility periods vs being a crucial safety rail, in event of on-Osmosis bug. + +TODO: Better fill out + +### Example suggested parameterization + +## Code structure + +As mentioned at the beginning of the README, the go code is a relatively minimal ICS 20 wrapper, that dispatches relevant calls to a cosmwasm contract that implements the rate limiting functionality. + +### Go Middleware + +To achieve this, the middleware needs to implement the `porttypes.Middleware` interface and the +`porttypes.ICS4Wrapper` interface. This allows the middleware to send and receive IBC messages by wrapping +any IBC module, and be used as an ICS4 wrapper by a transfer module (for sending packets or writing acknowledgements). + +Of those interfaces, just the following methods have custom logic: + +* `ICS4Wrapper.SendPacket` forwards to contract, with intent of tracking of value sent via an ibc channel +* `Middleware.OnRecvPacket` forwards to contract, with intent of tracking of value received via an ibc channel +* `Middleware.OnAcknowledgementPacket` forwards to contract, with intent of undoing the tracking of a sent packet if the acknowledgment is not a success +* `OnTimeoutPacket` forwards to contract, with intent of undoing the tracking of a sent packet if the packet times out (is not relayed) + +All other methods from those interfaces are passthroughs to the underlying implementations. + +#### Parameters + +The middleware uses the following parameters: + +| Key | Type | +| --------------- | ------ | +| ContractAddress | string | + +1. **ContractAddress** - + The contract address is the address of an instantiated version of the contract provided under `./contracts/` + +### Cosmwasm Contract Concepts + +Something to keep in mind with all of the code, is that we have to reason separately about every item in the following matrix: + +| Native Token | Non-Native Token | +| -------------------- | ------------------------ | +| Send Native Token | Send Non-Native Token | +| Receive Native Token | Receive Non-Native Token | +| Timeout Native Send | Timeout Non-native Send | + +(Error ACK can reuse the same code as timeout) + +TODO: Spend more time on sudo messages in the following description. We need to better describe how we map the quota concepts onto the code. +Need to describe how we get the quota beginning balance, and that its different for sends and receives. +Explain intracacies of tracking that a timeout and/or ErrorAck must appear from the same quota, else we ignore its update to the quotas. + + +The tracking contract uses the following concepts + +1. **RateLimit** - tracks the value flow transferred and the quota for a path. +2. **Path** - is a (denom, channel) pair. +3. **Flow** - tracks the value that has moved through a path during the current time window. +4. **Quota** - is the percentage of the denom's total value that can be transferred through the path in a given period of time (duration) + +#### Messages + +The contract specifies the following messages: + +##### Query + +* GetQuotas - Returns the quotas for a path + +##### Exec + +* AddPath - Adds a list of quotas for a path +* RemovePath - Removes a path +* ResetPathQuota - If a rate limit has been reached, the contract's governance address can reset the quota so that transfers are allowed again + +##### Sudo + +Sudo messages can only be executed by the chain. + +* SendPacket - Increments the amount used out of the send quota and checks that the send is allowed. If it isn't, it will return a RateLimitExceeded error +* RecvPacket - Increments the amount used out of the receive quota and checks that the receive is allowed. If it isn't, it will return a RateLimitExceeded error +* UndoSend - If a send has failed, the undo message is used to remove its cost from the send quota + +All of these messages receive the packet from the chain and extract the necessary information to process the packet and determine if it should be the rate limited. + +### Necessary information + +To determine if a packet should be rate limited, we need: + +* Channel: The channel on the Osmosis side: `packet.SourceChannel` for sends, and `packet.DestinationChannel` for receives. +* Denom: The denom of the token being transferred as known on the Osmosis side (more on that bellow) +* Channel Value: The total value of the chanel denominated in `Denom` (i.e.: channel-17 is worth 10k osmo). +* Funds: the amount being transferred + +#### Notes on Channel +The contract also supports quotas on a custom channel called "any" that is checked on every transfer. If either the +transfer channel or the "any" channel have a quota that has been filled, the transaction will be rate limited. + +#### Notes on Denom +We always use the the denom as represented on Osmosis. For native assets that is the local denom, and for non-native +assets it's the "ibc" prefix and the sha256 hash of the denom trace (`ibc/...`). + +##### Sends + +For native denoms, we can just use the denom in the packet. If the denom is invalid, it will fail somewhere else along the chain. Example result: `uosmo` + +For non-native denoms, the contract needs to hash the denom trace and append it to the `ibc/` prefix. The +contract always receives the parsed denom (i.e.: `transfer/channel-32/uatom` instead of +`ibc/27394FB092D2ECCD56123C74F36E4C1F926001CEADA9CA97EA622B25F41E5EB2`). This is because of the order in which +the middleware is called. When sending a non-native denom, the packet contains `transfer/source-channel/denom` as it +is built on the `relay.SendTransfer()` in the transfer module and then passed to the middleware. Example result: `ibc/` + +##### Receives + +This behaves slightly different if the asset is an osmosis asset that was sent to the counterparty and is being +returned to the chain, or if the asset is being received by the chain and originates on the counterparty. In ibc this +is called being a "source" or a "sink" respectively. + +If the chain is a sink for the denom, we build the local denom by prefixing the port and the channel +(`transfer/local-channel`) and hashing that denom. Example result: `ibc/` + +If the chain is the source for the denom, there are two possibilities: + +* The token is a native token, in which case we just remove the prefix added by the counterparty. Example result: `uosmo` +* The token is a non-native token, in which case we remove the extra prefix and hash it. Example result `ibc/` + +#### Notes on Channel Value +We have iterated on different strategies for calculating the channel value. Our preferred strategy is the following: +* For non-native tokens (`ibc/...`), the channel value should be the supply of those tokens in Osmosis +* For native tokens, the channel value should be the total amount of tokens in escrow across all ibc channels + +The later ensures the limits are lower and represent the amount of native tokens that exist outside Osmosis. This is +beneficial as we assume the majority of native tokens exist on the native chain and the amount "normal" ibc transfers is +proportional to the tokens that have left the chain. + +This strategy cannot be implemented at the moment because IBC does not track the amount of tokens in escrow across +all channels ([github issue](https://github.com/cosmos/ibc-go/issues/2664)). Instead, we use the current supply on +Osmosis for all denoms (i.e.: treat native and non-native tokens the same way). Once that ticket is fixed, we will +update this strategy. + +##### Caching + +The channel value varies constantly. To have better predictability, and avoid issues of the value growing if there is +a potential infinite mint bug, we cache the channel value at the beginning of the period for every quota. + +This means that if we have a daily quota of 1% of the osmo supply, and the channel value is 1M osmo at the beginning of +the quota, no more than 100k osmo can transferred during that day. If 10M osmo were to be minted or IBC'd in during that +period, the quota will not increase until the period expired. Then it will be 1% of the new channel value (~11M) + +### Integration + +The rate limit middleware wraps the `transferIBCModule` and is added as the entry route for IBC transfers. + +The module is also provided to the underlying `transferIBCModule` as its `ICS4Wrapper`; previously, this would have +pointed to a channel, which also implements the `ICS4Wrapper` interface. + +This integration can be seen in [osmosis/app/keepers/keepers.go](https://github.com/osmosis-labs/osmosis/blob/main/app/keepers/keepers.go) + +## Testing strategy + + +A general testing strategy is as follows: + +* Setup two chains. +* Send some tokens from A->B and some from B->A (so that there are IBC tokens to play with in both sides) +* Add the rate limiter on A with low limits (i.e. 1% of supply) +* Test Function for chains A' and B' and denom d + * Send some d tokens from A' to B' and get close to the limit. + * Do the same transfer making sure the amount is above the quota and verify it fails with the rate limit error + * Wait until the reset time has passed, and send again. The transfer should now succeed +* Repeat the above test for the following combination of chains and tokens: `(A,B,a)`, `(B,A,a)`, `(A,B,b)`, `(B,A,b)`, + where `a` and `b` are native tokens to chains A and B respectively. + +For more comprehensive tests we can also: +* Add a third chain C and make sure everything works properly for C tokens that have been transferred to A and to B +* Test that the contracts gov address can reset rate limits if the quota has been hit +* Test the queries for getting information about the state of the quotas +* Test that rate limit symmetries hold (i.e.: sending the a token through a rate-limited channel and then sending back + reduces the rate limits by the same amount that it was increased during the first send) +* Ensure that the channels between the test chains have different names (A->B="channel-0", B->A="channel-1", for example) + +## Known Future work + +Items that have been highlighted above: + +* Making automated rate limits get added for channels, instead of manual configuration only +* Improving parameterization strategies / data analysis +* Adding the USDC based rate limits +* We need better strategies for how rate limits "expire". + +Not yet highlighted + +* Making monitoring tooling to know when approaching rate limiting and when they're hit +* Making tooling to easily give us summaries we can use, to reason about "bug or not bug" in event of rate limit being hit +* Enabling ways to pre-declare large transfers so as to not hit rate limits. + * Perhaps you can on-chain declare intent to send these assets with a large delay, that raises monitoring but bypasses rate limits? + * Maybe contract-based tooling to split up the transfer suffices? +* Strategies to account for high volatility periods without hitting rate limits + * Can imagine "Hop network" style markets emerging + * Could imagine tieng it into looking at AMM volatility, or off-chain oracles + * but these are both things we should be wary of security bugs in. + * Maybe [constraint based programming with tracking of provenance](https://youtu.be/HB5TrK7A4pI?t=2852) as a solution +* Analyze changing denom-based rate limits, to just overall withdrawal amount for Osmosis \ No newline at end of file diff --git a/docs/sdk/marker/01_state.md b/docs/sdk/marker/01_state.md new file mode 100644 index 000000000..70059aaed --- /dev/null +++ b/docs/sdk/marker/01_state.md @@ -0,0 +1,180 @@ +# State + + + - [Marker Accounts](#marker-accounts) + - [Marker Types](#marker-types) + - [Access Grants](#access-grants) + - [Fixed Supply vs Floating](#fixed-supply-vs-floating) + - [Forced Transfers](#forced-transfers) + - [Required Attributes](#required-attributes) + - [Marker Address Cache](#marker-address-cache) + - [Marker Net Asset Value](#marker-net-asset-value) + - [Params](#params) + + + +## Marker Accounts + +Markers are represented as a type that extends the `base_account` type of the `auth` SDK module. As a valid account a +marker is able to perform normal functions such as receiving and holding coins, and having a defined address that can +be queried against for balance information from the `bank` module. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/marker.proto#L28-L63 +```go +type MarkerAccount struct { + // cosmos base_account including address and account number + Address string + AccountNumber uint64 + + PubKey *types.Any // NOTE: not used for marker, it is not possible to sign for a marker account directly + Sequence uint64 // NOTE: always zero on marker + + // Address that owns the marker configuration. This account must sign any requests + // to change marker config (only valid for statuses prior to finalization) + Manager string + + // Access control lists. Account addresses are assigned control of the marker using these entries + AccessControl []AccessGrant + + // Indicates the current status of this marker record. (Pending, Active, Cancelled, etc) + Status MarkerStatus + + // value denomination. + Denom string + + // the total supply expected for a marker. This is the amount that is minted when a marker is created. For + // SupplyFixed markers this value will be enforced through an invariant that mints/burns from this account to + // maintain a match between this value and the supply on the chain (maintained by bank module). For all non-fixed + // supply markers this value will be set to zero when the marker is activated. + Supply Int + + // Marker type information. The type of marker controls behavior of its account. + MarkerType MarkerType + + // A fixed supply will mint additional coin automatically if the total supply decreases below a set value. This + // may occur if the coin is burned or an account holding the coin is slashed. (default: true) + SupplyFixed bool + + // indicates that governance based control is allowed for this marker + AllowGovernanceControl bool + + // Whether an admin can transfer restricted coins from a 3rd-party account without their signature. + AllowForcedTransfer bool + + // list of required attributes on restricted marker in order to send and receive transfers if sender does not have + // transfer authority + RequiredAttributes []string +} +``` + +### Marker Types + +There are currently two basic types of markers. + +- **Coin** - A marker with a type of coin represents a standard fungible token with zero or more coins in circulation +- **Restricted Coin** - Restricted Coins work just like a regular coin with one important difference--the bank module + "send_enabled" status for the coin is set to false. This means that a user account that holds the coin can not send + it to another account directly using the bank module. In order to facilitate exchange there must be an address set + on the marker with the "Transfer" permission grant. This address must sign calls to the marker module to move these + coins between accounts using the `transfer` method on the api. + +### Access Grants + +Control of a marker account is configured through a list of access grants assigned to the marker when it is created +or applied afterwards through the API calls to add or remove access. + +```go +const ( + // ACCESS_UNSPECIFIED defines a no-op vote option. + Access_Unknown Access = 0 + // ACCESS_MINT is the ability to increase the supply of a marker + Access_Mint Access = 1 + // ACCESS_BURN is the ability to decrease the supply of the marker using coin held by the marker. + Access_Burn Access = 2 + // ACCESS_DEPOSIT is the ability to set a marker reference to this marker in the metadata/scopes module + Access_Deposit Access = 3 + // ACCESS_WITHDRAW is the ability to remove marker references to this marker in from metadata/scopes or + // transfer coin from this marker account to another account. + Access_Withdraw Access = 4 + // ACCESS_DELETE is the ability to move a proposed, finalized or active marker into the cancelled state. This + // access also allows cancelled markers to be marked for deletion + Access_Delete Access = 5 + // ACCESS_ADMIN is the ability to add access grants for accounts to the list of marker permissions. + Access_Admin Access = 6 + // ACCESS_TRANSFER is the ability to invoke a send operation using the marker module to facilitate exchange. + // This capability is useful when the marker denomination has "send enabled = false" preventing normal bank transfer + Access_Transfer Access = 7 +) + +// A structure associating a list of access permissions for a given account identified by is address +type AccessGrant struct { + // A bech32 encoded address string of the account the permissions are assigned to + Address string + // An array of enum values as defined above + Permissions AccessList +} +``` + +### Fixed Supply vs Floating + +A marker can be configured to have a fixed supply or one that is allowed to float. A marker will always mint an amount +of coin indicated in its `supply` field when it is activated. For markers that have a fixed supply an invariant check +is enforced that ensures the supply of the marker alway matches the configured value. For a floating supply no +additional checks or adjustments are performed and the supply value is set to zero when activated. + +#### When a Marker has a Fixed Supply that does not match target + +Under certain conditions a marker may begin a block with a total supply in circulation less than its configured amount. +When this occurs the marker will take action to correct the balance of coin supply. + +**A fixed supply marker will attempt to automatically correct a supply imbalance at the start of the next block** + +This means that if the supply in circulation exceeds the configured amount the attempted fix is to burn a required +amount from the marker's account itself. If this fails an invariant will be broken and the chain will halt. + +If the requested supply is greater than the amount in circulation (as occurs when a coin is burned in a slash) the +marker module will mint the difference between expected supply and circulation and place the created coin in the marker's +account. + +A supply imbalance typically occurs during the genesis of a blockchain when a fixed supply for a marker is less than +the initial balances assigned to accounts. It may also occur if the marker is associated with the bind denom of the +chain and a slash penalty is assessed resulting in the burning of a portion of coins. + +### Forced Transfers + +A marker with the **Restricted Coin** type can be configured to allow forced transfer of funds for that marker's denom. +A forced transfer is one where the `admin` (with `TRANSFER` access) is different than the `from` address. In such cases, +if the marker allows forced transfers, the transfer is allowed. If forced transfers are not allowed, an `admin` cannot +transfer the marker's coins from another account unless granted permission to do so via `authz`. + +Markers with **Coin** type cannot be configured to allow forced transfers. + +### Required Attributes + +A marker with the **Restricted Coin** type can be configured to allow transfers with a normal `MsgSend` to address that have defined attributes. +This can be configured by setting the `required_attributes` array on the Marker. When a `MsgSend` transaction is executed and the coin type is `restricted`, the `required_attributes` are checked. If the `ToAddress` associated with the `MsgSend` command has **all** the required attributes, the transfer will be executed. + +A single wildcard can only be used for the starting name of the required attribute. For example, `*.provenance.io` is a valid wildcard attribute. Invalid wildcard usages include forms such as `*kyc.provenance.io` or `kyc.*.provenance.io`. Matching will be accepted for any number of child level names, i.e. `one.two.three.provenance.io` and `one.provenance.io` will be accepted for `*.provenance.io`. + +## Marker Address Cache + +For performance purposes the marker module maintains a KVStore entry with the address of every marker account. This +allows for cheap iterator operations over all marker accounts without having to filter through the native account +iterator from the auth module. + +- `0x01 | Address -> Address` + +### Marker Net Asset Value + +A marker can support multiple distinct net asset values assigned to track settlement pricing information on-chain. The `price` attribute denotes the value assigned to the marker for a specific asset's associated `volume`. For instance, when considering a scenario where 10 billion `nhash` holds a value of 15¢, the corresponding `volume` should reflect the quantity of 10,000,000,000. The `update_block_height` attribute captures the block height when the update occurred. + ++++ https://github.com/provenance-io/provenance/blob/25070572cc898c476f5bb1a816c6c1c4d07e3d38/proto/provenance/marker/v1/marker.proto#L96-L104 + +## Params + +Params is a module-wide configuration structure that stores system parameters +and defines overall functioning of the marker module. + +- Params: `Paramsspace("marker") -> legacy_amino(params)` + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/marker.proto#L14-L25 diff --git a/docs/sdk/marker/02_state_transitions.md b/docs/sdk/marker/02_state_transitions.md new file mode 100644 index 000000000..23a8fbd3e --- /dev/null +++ b/docs/sdk/marker/02_state_transitions.md @@ -0,0 +1,95 @@ +# State Transitions + +This document describes the state transition operations pertaining markers: + + + - [Undefined](#undefined) + - [Proposed](#proposed) + - [Finalized](#finalized) + - [Active](#active) + - [Cancelled](#cancelled) + - [Destroyed](#destroyed) + + + +## Undefined + +The undefined status is not allowed and its use will be flagged as an error condition. + +## Proposed + +The proposed status is the initial state of a marker. A marker in the `proposed` status will accept +changes to supply via the `mint`/`burn` methods, updates to the access list, and state transitions when +called by the address set in the `manager` property. + +On Transition: +- Proposed is the initial state of a marker by default. It is not possible to transition to this state from any other. + +Next Status: +- **Finalized** +- **Cancelled** + +## Finalized + +The finalized state of the marker is used to verify the readiness of a marker before activating it. + +Requirements: +- Marker must exist +- Caller address must match the `manager` address on the marker +- Current status of marker must be `Proposed` +- Supply of the marker must meet or exceed the amount of any existing coin in circulation on the network of + the denom of the marker. (This will only apply ) + +On Transition: +- Marker status is set to `Finalized` +- A marker finalize typed event is dispatched + +Next Status: +- **Active** +- **Cancelled** + +## Active + +An active marker is considered ready for use. + +On Transition: +- Marker status is set to `Active` +- Requested coin supply is minted and placed in the marker account +- For markers with a `fixed_supply` the Invariant checks are performed in `begin_block` +- Permissions as assigned in the access list are enforced for any management actions performed +- The `manager` field is cleared. All management actions require explicit permission grants. +- A marker activate typed event is dispatched + +Next Status: +- **Cancelled** + +## Cancelled + +A cancelled marker will have no coin supply in circulation. Markers may remain in the Cancelled state long term to +prevent their denom reuse by another future marker. If a marker is no longer needed at all then the **Destroyed** +status maybe appropriate. + +Requirements: +- Caller must have the `delete` permission assigned to their address or +- Caller must be the manager of the marker (applies only to proposed markers that are Cancelled) +- The supply of the coin in circulation outside of the marker account must be zero. + +On Transition: +- Marker status is set to `Cancelled` +- A marker Cancelled typed event is dispatched + +Next Status: +- **Destroyed** + +## Destroyed + +A destroyed marker is denoted as available for subsequent removal from the state store by clean up processes. Markers +in the destroyed status will be removed in the Begin Block ABCI handler at the beginning of the next block (v1.3.0+). + +On Transition: +- All supply of the coin denom will be burned. +- Marker status is set to `Destroyed` +- Marker will ultimately be deleted from the KVStore during the next ABCI Begin Block (v1.3.0+) + +Next Status: +- **None** diff --git a/docs/sdk/marker/03_messages.md b/docs/sdk/marker/03_messages.md new file mode 100644 index 000000000..38f9438aa --- /dev/null +++ b/docs/sdk/marker/03_messages.md @@ -0,0 +1,427 @@ +# Messages + +In this section we describe the processing of the marker messages and the corresponding updates to the state. +All created/modified state objects specified by each message are defined within the +[state](./02_state_transitions.md) section. + + + - [Msg/AddMarkerRequest](#msgaddmarkerrequest) + - [Msg/AddAccessRequest](#msgaddaccessrequest) + - [Msg/DeleteAccessRequest](#msgdeleteaccessrequest) + - [Msg/FinalizeRequest](#msgfinalizerequest) + - [Msg/ActivateRequest](#msgactivaterequest) + - [Msg/CancelRequest](#msgcancelrequest) + - [Msg/DeleteRequest](#msgdeleterequest) + - [Msg/MintRequest](#msgmintrequest) + - [Msg/BurnRequest](#msgburnrequest) + - [Msg/WithdrawRequest](#msgwithdrawrequest) + - [Msg/TransferRequest](#msgtransferrequest) + - [Msg/IbcTransferRequest](#msgibctransferrequest) + - [Msg/SetDenomMetadataRequest](#msgsetdenommetadatarequest) + - [Msg/AddFinalizeActivateMarkerRequest](#msgaddfinalizeactivatemarkerrequest) + - [Msg/GrantAllowanceRequest](#msggrantallowancerequest) + - [Msg/SupplyIncreaseProposalRequest](#msgsupplyincreaseproposalrequest) + - [Msg/UpdateRequiredAttributesRequest](#msgupdaterequiredattributesrequest) + - [Msg/UpdateSendDenyListRequest](#msgupdatesenddenylistrequest) + - [Msg/UpdateForcedTransferRequest](#msgupdateforcedtransferrequest) + - [Msg/SetAccountDataRequest](#msgsetaccountdatarequest) + - [Msg/AddNetAssetValuesRequest](#msgaddnetassetvaluesrequest) + + +## Msg/AddMarkerRequest + +A marker is created using the Add Marker service message. +The created marker can not be directly added in an Active (or Cancelled/Destroyed) status. Markers +must have a valid supply and denomination value. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L77-L93 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L95-96 + + +This service message is expected to fail if: +- The Denom string: + - Is already in use by another marker + - Does not conform to the "Marker Denom Validation Expression" (`unrestricted_denom_regex` param) + - Does not conform to the base coin denom validation expression parameter +- The supply value: + - Is less than zero + - Is greater than the "max supply" parameter +- The Marker Status: + - Is Active (markers can not be created as active the must transition from Finalized) + - Is Cancelled + - Is Destroyed +- The manager address is invalid. (Note: an empty manager address will be set to the Msg from address) + +The service message will create a marker account object and request the auth module persist it. No coin will be minted +or disbursed as a result of adding a marker using this endpoint. + +If issued via governance proposal, and has a `from_address` of the governance module account: +- The marker status can be Active. +- The `unrestricted_denom_regex` check is not applied. Denoms still need to conform to the base coin denom format though. +- The marker's `allow_governance_control` flag ignores the `enable_governance` param value, and is set to the provided value. +- If the marker status is Active, and no `manager` is provided, it is left blank (instead of being populated with the `from_address`). + +## Msg/AddAccessRequest + +Add Access Request is used to add permissions to a marker that allow the specified accounts to perform the specified actions. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L98-L103 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L105-106 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is pending: + - And the request is not signed with an administrator address that matches the manager address or: + - The given administrator address does not currently have the "admin" access granted on the marker +- The accesslist: + - Contains more than one entry for a given address + - Contains a grant with an invalid address + - Contains a grant with an invalid access enum value (Unspecified/0) + +The Add Access request can be called many times on a marker with some or all of the access grant values. The method may +only be used against markers in the `Pending` status when called by the current marker manager address or against `Finalized` +and `Active` markers when the caller is currently assigned the `Admin` access type. + +## Msg/DeleteAccessRequest + +DeleteAccess Request defines the Msg/DeleteAccess request type + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L108-L113 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L114-L115 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not pending or: + - The request is not signed with an administrator address that matches the manager address or: + - The given administrator address does not currently have the "admin" access granted on the marker + +The Delete Access request will remove all access granted to the given address on the specified marker. The method may +only be used against markers in the `Pending` status when called by the current marker manager address or against `Finalized` +and `Active` markers when the caller is currently assigned the `Admin` access type. + +## Msg/FinalizeRequest + +Finalize Request defines the Msg/Finalize request type + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L117-L121 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L122-L123 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not in a `proposed` status or: + - The request is not signed with an administrator address that matches the manager address or: + - The given administrator address does not currently have the "admin" access granted on the marker + +The `Finalize` marker status performs a set of checks to ensure the marker is ready to be activated. It is designed to +serve as an intermediate step prior to activation that indicates marker configuration is complete. + +## Msg/ActivateRequest + +Activate Request defines the Msg/Activate request type + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L125-L129 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L130-L131 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not in a `Finalized` status or: + - The request is not signed with an administrator address that matches the manager address or: + - The given administrator address does not currently have the "admin" access granted on the marker +- The marker has a supply less than the current in circulation supply (for markers created against existing coin) + +The Activate marker request will mint any coin required to achieve a circulation target set by the total supply. In +addition the marker will no longer be managed by an indicated "manager" account but will instead require explicit +rights assigned as access grants for any modification. + +If a marker has a fixed supply the begin block/invariant supply checks are also performed. If the supply is expected to +float then the `total_supply` value will be set to zero upon activation. + +## Msg/CancelRequest + +Cancel Request defines the Msg/Cancel request type + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L133-L137 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L138-L139 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not in a `Pending` or `Active` status +- If marker is in a `Pending` status and: + - The given administrator address does not currently have the "admin" access granted on the marker + - Or given administrator is not listed as the manager on the marker +- If marker is in a `Active` status and: + - The given administrator address does not currently have the "admin" access granted on the marker +- The amount in circulation is greater than zero or any remaining amount is not currently held in escrow within the + marker account. + +## Msg/DeleteRequest + +Delete Request defines the Msg/Delete request type + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L141-L145 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L146-L147 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not in a `Cancelled` status +- The given administrator address does not currently have the "admin" access granted on the marker or: + - If the marker was previously in a `Proposed` status when cancelled the administrator must be the marker manager. +- The amount in circulation is greater than zero or any remaining amount is not currently held in escrow within the + marker account. +- There are any other coins remaining in escrow after supply has been fully burned. + +## Msg/MintRequest + +Mint Request defines the Msg/Mint request type + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L149-L154 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L155-L156 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not in a `Active` status or: + - The request is not signed with an administrator address that matches the manager address or: +- The given administrator address does not currently have the "mint" access granted on the marker +- The requested amount of mint would increase the total supply in circulation above the configured supply limit set in + the marker module params + +## Msg/BurnRequest + +Burn Request defines the Msg/Burn request type that is used to remove supply of the marker coin from circulation. In +order to successfully burn supply the amount to burn must be held by the marker account itself (in escrow). + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L158-L163 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L164-L165 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not in an `Active` status or: + - The request is not signed with an administrator address that matches the manager address or: +- The given administrator address does not currently have the "burn" access granted on the marker +- The amount of coin to burn is not currently held in escrow within the marker account. + +## Msg/WithdrawRequest + +Withdraw Request defines the Msg/Withdraw request type and is used to withdraw coin from escrow within the marker. + +NOTE: any denom coin can be held within a marker "in escrow", these values are not restricted to just the denom of the +marker itself. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L167-L174 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L175-L176 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- If marker is not in a `Active` status: + - The request is not signed with an administrator address that matches the manager address + - For `Pending` status: the denom being withdrawn from the marker matches the marker denom +- If the marker is `Active`, `Cancelled` + - The given administrator address does not currently have the "withdraw" access granted on the marker +- The amount of coin requested for withdraw is not currently held by the marker account + +## Msg/TransferRequest + +Transfer Request defines the Msg/Transfer request type. A transfer request is used to transfer coin between two +accounts for `RESTRICTED_COIN` type markers. Such markers have `send_enabled=false` configured with the `x/bank` module, +and thus cannot be sent using a normal `MsgSend` operation. A transfer request requires a signature from an account +with `TRANSFER` access. If force transfer is not enabled for the marker, the source account must have granted the admin +permission (via `authz`) to do the transfer. If force transfer is allowed for the marker, the source account does not +need to approve of the transfer. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L178-L185 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L187-L188 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is not in a `Active` status or: + - The given administrator address does not currently have the "transfer" access granted on the marker + - The marker types is not `RESTRICTED_COIN` + +## Msg/IbcTransferRequest + +Ibc transfer Request defines the Msg/IbcTransfer request type. The `IbcTransferRequest` is used to transfer `RESTRICTED_COIN` type markers to another chain via ibc. These coins have their `send_enabled` flag disabled by the bank module and thus cannot be sent using a normal `send_coin` operation. + +NOTE: A transfer request also requires a signature from an account with the transfer permission as well as approval from the account the funds will be withdrawn from. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L190-L197 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L199-L200 + +## Msg/SetDenomMetadataRequest + +SetDenomMetadata Request defines the Msg/SetDenomMetadata request type. This request is used to set the informational +denom metadata held within the bank module. Denom metadata can be used to provide a more streamlined user experience +within block explorers or similar applications. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L202-L207 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L209-L210 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The request is not signed with an administrator address that matches the manager address or: +- The given administrator address does not currently have the "admin" access granted on the marker +- Any of the provided display denoms is found to be invalid + - Does not match the proper form with an SI unit prefix matching the associated exponent + - Is missing the denom unit for the indicated base denom or display denom unit. + - If there is an existing record the update will fail if: + - The Base denom is changed. + If marker status is `Active` or `Finalized`: + - Any DenomUnit entries are removed. + - DenomUnit Denom fields are modified. + - Any aliases are removed from a DenomUnit. + +## Msg/AddFinalizeActivateMarkerRequest + +AddFinalizeActivate requested is used for adding, finalizing, and activating a marker in a single request. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L212-L224 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L226-L227 + +This service message is expected to fail if: + +- The given denom value is invalid or does not match an existing marker on the system +- The marker is pending: + - And the request is not signed with an administrator address that matches the manager address or: + - The given administrator address does not currently have the "admin" access granted on the marker +- The accesslist: + - Contains more than one entry for a given address + - Contains a grant with an invalid address + - Contains a grant with an invalid access enum value (Unspecified/0) + +## Msg/GrantAllowanceRequest + +GrantAllowance grants a fee allowance to the grantee on the granter's account. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L61-L72 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L74-L75 + +This service message is expected to fail if: + +- Any field is empty. +- The allowance is invalid +- The given denom value is invalid or does not match an existing marker on the system +- The administrator or grantee are invalid addresses +- The administrator does not have `ADMIN` access on the marker. + +## Msg/SupplyIncreaseProposalRequest + +SupplyIncreaseProposal is a governance-only message for increasing the supply of a marker. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L229-L239 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1//tx.proto#L241-L242 + +This service message is expected to fail if: + +- The authority is not the address of the governance module's account. +- The governance proposal format (title, description, etc) is invalid +- The requested supply exceeds the configuration parameter for `MaxSupply` + +See also: [Governance: Supply Increase Proposal](./10_governance.md#supply-increase-proposal) + +## Msg/UpdateRequiredAttributesRequest + +UpdateRequiredAttributes allows signers that have transfer authority or via gov proposal to add and remove required attributes from a restricted marker. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L244-L255 + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/tx.proto#L257-L258 + +This service message is expected to fail if: + +- Remove list has an attribute that does not exist in current Required Attributes +- Add list has an attribute that already exist in current Required Attributes +- Attributes cannot be normalized +- Marker denom cannot be found or is not a restricted marker + +## Msg/UpdateSendDenyListRequest + +UpdateSendDenyList allows signers that have transfer authority or via gov proposal to add and remove addresses to the deny send list for a restricted marker. + ++++ https://github.com/provenance-io/provenance/blob/373d0ee8faeaa9e3b70d658e6069ab1781e6ce5e/proto/provenance/marker/v1/tx.proto#L295-L308 + ++++ https://github.com/provenance-io/provenance/blob/373d0ee8faeaa9e3b70d658e6069ab1781e6ce5e/proto/provenance/marker/v1/tx.proto#L310-L311 + +This service message is expected to fail if: + +- Remove list has an address that does not exist in current deny list +- Add list has an attribute that already exist in current deny list +- Both add and remove lists are empty +- Invalid address format in add/remove lists +- Marker denom cannot be found or is not a restricted marker +- Signer does not have transfer authority or is not from gov proposal + +## Msg/UpdateForcedTransferRequest + +UpdateForcedTransfer allows for the activation or deactivation of forced transfers for a marker. +This message must be submitted via governance proposal. + ++++ https://github.com/provenance-io/provenance/blob/a830a8ecf24199469de52b92ee20475d6912f2eb/proto/provenance/marker/v1/tx.proto#L260-L272 + ++++ https://github.com/provenance-io/provenance/blob/a830a8ecf24199469de52b92ee20475d6912f2eb/proto/provenance/marker/v1/tx.proto#L274-L275 + +This service message is expected to fail if: + +- The authority is not the governance module account address. +- No marker with the provided denom exists. +- The marker is not a restricted coin. +- The marker does not allow governance control. + +## Msg/SetAccountDataRequest + +SetAccountData allows the association of some data (a string) with a marker. + ++++ https://github.com/provenance-io/provenance/blob/e83f1955cba07e2ba87790c4487d22632ae9e69c/proto/provenance/marker/v1/tx.proto#L279-L291 + ++++ https://github.com/provenance-io/provenance/blob/e83f1955cba07e2ba87790c4487d22632ae9e69c/proto/provenance/marker/v1/tx.proto#L293-L294 + +This endpoint can either be used directly or via governance proposal. + +This service message is expected to fail if: + +- No marker with the provided denom exists. +- The signer is the governance module account address but the marker does not allow governance control. +- The signer is not the governance module account and does not have deposit access on the marker. +- The provided value is too long (as defined by the attribute module params). + +## Msg/AddNetAssetValuesRequest + +AddNetAssetValuesRequest allows for the adding/updating of net asset values for a marker. + ++++ https://github.com/provenance-io/provenance/blob/25070572cc898c476f5bb1a816c6c1c4d07e3d38/proto/provenance/marker/v1/tx.proto#L327-L332 + ++++ https://github.com/provenance-io/provenance/blob/25070572cc898c476f5bb1a816c6c1c4d07e3d38/proto/provenance/marker/v1/tx.proto#L334-L335 + +This endpoint can either be used directly or via governance proposal. + +This service message is expected to fail if: + +- No marker with the provided denom exists. +- The signer is the governance module account address but the marker does not allow governance control. +- The signer is not the governance module account and does not have any access on the marker. +- The provided net value asset properties are invalid. diff --git a/docs/sdk/marker/04_begin_block.md b/docs/sdk/marker/04_begin_block.md new file mode 100644 index 000000000..db69ebf81 --- /dev/null +++ b/docs/sdk/marker/04_begin_block.md @@ -0,0 +1,18 @@ +# Begin-Block + + +## Supply Checks + +Each ABCI begin block call, all markers that are active and have a fixed supply +are evaluated to ensure configured supply level matches actual supply levels. + +- For markers that have a configured supply exceeding the amount in circulation the difference is minted and placed + within the marker account. +- For markers that have a configured supply less than the amount in circulation, an attempt to burn sufficient coin + to balance the circulation against the the supply will be performed. If the marker does not hold enough coin to + perform this action an invariant constraint violation is thrown and the chain will halt. + +## Destroyed Markers +In addition to supply checks the ABCI begin block call is used to purge markers that have been selected for deletion. + +- Markers in the `destroyed` status are deleted from the KVStore. diff --git a/docs/sdk/marker/05_end_block.md b/docs/sdk/marker/05_end_block.md new file mode 100644 index 000000000..4040072cc --- /dev/null +++ b/docs/sdk/marker/05_end_block.md @@ -0,0 +1,3 @@ +# End-Block + +The end block handler is not used by the marker module. \ No newline at end of file diff --git a/docs/sdk/marker/06_hooks.md b/docs/sdk/marker/06_hooks.md new file mode 100644 index 000000000..e4117c489 --- /dev/null +++ b/docs/sdk/marker/06_hooks.md @@ -0,0 +1,3 @@ +# Hooks + +The marker module does not expose any hooks for callback registration within its api. \ No newline at end of file diff --git a/docs/sdk/marker/07_events.md b/docs/sdk/marker/07_events.md new file mode 100644 index 000000000..1e51eaf20 --- /dev/null +++ b/docs/sdk/marker/07_events.md @@ -0,0 +1,218 @@ +# Events + +The marker module emits the following events: + + + - [Marker Added](#marker-added) + - [Grant Access](#grant-access) + - [Revoke Access](#revoke-access) + - [Finalize](#finalize) + - [Activate](#activate) + - [Cancel](#cancel) + - [Destroy](#destroy) + - [Mint](#mint) + - [Burn](#burn) + - [Withdraw](#withdraw) + - [Transfer](#transfer) + - [Set Denom Metadata](#set-denom-metadata) + + + +--- +## Marker Added + +Fires when a marker is added using the Add Marker Msg. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| EventMarkerAdd | Denom | \{denom string\} | +| EventMarkerAdd | Address | \{marker address\} | +| EventMarkerAdd | Amount | \{supply amount\} | +| EventMarkerAdd | Manager | \{admin account address\} | +| EventMarkerAdd | Status | \{current marker status\} | +| EventMarkerAdd | MarkerType | \{type of marker\} | + +`provenance.marker.v1.EventMarkerAdd` + +--- +## Grant Access + +Fires when administrative access is granted for a marker + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| EventMarkerAddAccess | Denom | \{denom string\} | +| EventMarkerAddAccess | Administrator | \{admin account address\} | +| EventMarkerAddAccess | Access | \{access grant format\} | + +`provenance.marker.v1.EventMarkerAddAccess` + +### Access Grant Format + +| Attribute Key | Attribute Value | +| --------------------- | ------------------------ | +| Address | \{bech32 address string\} | +| Permissions | \{array of role names\} | + +`provenance.marker.v1.EventMarkerAccess` + +--- +## Revoke Access + +Fires when all access grants are removed for a given address. + +| Type | Attribute Key | Attribute Value | +| ------------------------ | --------------------- | ------------------------- | +| EventMarkerDeleteAccess | Denom | \{denom string\} | +| EventMarkerDeleteAccess | Administrator | \{admin account address\} | +| EventMarkerDeleteAccess | RemoveAddress | \{address removed\} | + +`provenance.marker.v1.EventMarkerDeleteAccess` + +--- +## Finalize + +Fires when a marker is finalized. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| EventMarkerFinalize | Denom | \{denom string\} | +| EventMarkerFinalize | Administrator | \{admin account address\} | + +`provenance.marker.v1.EventMarkerFinalize` + +--- +## Activate + +Fires when a marker is activated. + +| Type | Attribute Key | Attribute Value | +| ------------------------- | --------------------- | ------------------------- | +| EventMarkerActivate | Denom | \{denom string\} | +| EventMarkerActivate | Administrator | \{admin account address\} | + +`provenance.marker.v1.EventMarkerActivate` + +--- +## Cancel + +Fired when a marker is cancelled successfully. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| EventMarkerCancel | Denom | \{denom string\} | +| EventMarkerCancel | Administrator | \{admin account address\} | + +`provenance.marker.v1.EventMarkerCancel` + +--- +## Destroy + +Fires when a marker is marked as destroyed and ready for removal. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| EventMarkerDelete | Denom | \{denom string\} | +| EventMarkerDelete | Administrator | \{admin account address\} | + +`provenance.marker.v1.EventMarkerDelete` + +--- +## Mint + +Fires when coins are minted for a marker. + +| Type | Attribute Key | Attribute Value | +| ------------------ | --------------------- | ------------------------- | +| EventMarkerMint | Denom | \{denom string\} | +| EventMarkerMint | Amount | \{supply amount\} | +| EventMarkerMint | Administrator | \{admin account address\} | + +`provenance.marker.v1.EventMarkerMint` + +--- +## Burn + +Fires when coins are burned from a marker account. + +| Type | Attribute Key | Attribute Value | +| ------------------ | --------------------- | ------------------------- | +| EventMarkerBurn | Denom | \{denom string\} | +| EventMarkerBurn | Amount | \{supply amount\} | +| EventMarkerBurn | Administrator | \{admin account address\} | + +`provenance.marker.v1.EventMarkerBurn` + +--- + +Fires when coin is removed from a marker account and transferred to another. +## Withdraw + +| Type | Attribute Key | Attribute Value | +| -------------------- | --------------------- | --------------------------- | +| EventMarkerWithdraw | Denom | \{denom string\} | +| EventMarkerWithdraw | Amount | \{supply amount\} | +| EventMarkerWithdraw | Administrator | \{admin account address\} | +| EventMarkerWithdraw | ToAddress | \{recipient account address\} | + +`provenance.marker.v1.EventMarkerWithdraw` + +--- +## Transfer + +Fires when a facilitated transfer is performed of the marker's coin between accounts by an administrator + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | --------------------------- | +| EventMarkerTransfer | Denom | \{denom string\} | +| EventMarkerTransfer | Amount | \{supply amount\} | +| EventMarkerTransfer | Administrator | \{admin account address\} | +| EventMarkerTransfer | FromAddress | \{source account address\} | +| EventMarkerTransfer | ToAddress | \{recipient account address\} | + +`provenance.marker.v1.EventMarkerTransfer` + +## Set Denom Metadata + +Fires when the denom metadata is set for a marker + +| Type | Attribute Key | Attribute Value | +| ----------------------------- | --------------------- | --------------------------- | +| EventMarkerSetDenomMetadata | MetadataBase | \{marker's denom string\} | +| EventMarkerSetDenomMetadata | MetadataDescription | \{description string\} | +| EventMarkerSetDenomMetadata | MetadataDisplay | \{denom string\} | +| EventMarkerSetDenomMetadata | MetadataName | \{name string\} | +| EventMarkerSetDenomMetadata | MetadataSymbol | \{symbol string\} | +| EventMarkerSetDenomMetadata | MetadataDenomUnits | \{array of denom units\} | +| EventMarkerSetDenomMetadata | Administrator | \{admin account address\} | + +`provenance.marker.v1.EventMarkerSetDenomMetadata` + +### Denom Unit Format + +Denom units have a specified exponent (1-18), a specified denom, and a list of optional aliases. Example +aliases for `uhash` might be `microhash` or `µhash` + +| Attribute Key | Attribute Value | +| --------------------- | ------------------------ | +| Denom | \{denom string\} | +| Exponent | \{uint\} | +| Aliases | \{array of denom strings\} | + +`provenance.marker.v1.EventDenomUnit` + +### Set Net Asset Value + +Fires when a new NetAssetValue is add or updated for a marker. + +| Type | Attribute Key | Attribute Value | +| ------------------------| --------------------- | ------------------------------------------------- | +| EventSetNetAssetValue | Denom | \{marker's denom string\} | +| EventSetNetAssetValue | Price | \{token amount the marker is valued at for volume\} | +| EventSetNetAssetValue | Volume | \{total volume/shares associated with price\} | +| EventSetNetAssetValue | Source | \{source address of caller\} | + + +`provenance.marker.v1.EventSetNetAssetValue` + +--- diff --git a/docs/sdk/marker/08_telemetry.md b/docs/sdk/marker/08_telemetry.md new file mode 100644 index 000000000..9f2eb492d --- /dev/null +++ b/docs/sdk/marker/08_telemetry.md @@ -0,0 +1,15 @@ +# Telemetry + +The marker module exposes a limited set of telemetry for monitoring is operations. + +> NOTE: The majority of the telemetry that applies to the marker module is exposed by the `bank` module and the `auth` +> module which the marker module uses to perform most of its functions. + +## Transferred Amount + +For transfers of restricted coins the amount moved and the associated denom are published. + +| Labels | Value | +| ----------------------- | -------------- | +| `tx`, `msg`, `transfer` | amount `int64` | +| `denom` | marker denom | \ No newline at end of file diff --git a/docs/sdk/marker/09_params.md b/docs/sdk/marker/09_params.md new file mode 100644 index 000000000..6027d190e --- /dev/null +++ b/docs/sdk/marker/09_params.md @@ -0,0 +1,27 @@ +# Parameters + +The marker module contains several settings that control operation of the module managed by the +Param module and available for control via Governance proposal to change parameters. + +## Params + +| Key | Type | Example | +| ---------------------- | ---------- | --------------------------------- | +| MaxTotalSupply | `uint64` | `"259200000000000"` | +| MaxSupply | `math.Int` | `"259200000000000"` | +| EnableGovernance | `bool` | `true` | +| UnrestrictedDenomRegex | `string` | `"[a-zA-Z][a-zA-Z0-9\-\.]{7,83}"` | + + +## Definitions + +- **Max Total Supply** (uint64) - A value indicating the maximum supply level allowed for any added marker. This is now deprecated and should not be used. + +- **Enable Governance** (boolean) - A flag indicating if `allow_governance_control` setting on added markers must + be set to `true`. + +- **Unrestricted Denom Regex** (string) - A regular expression that is used to check the denom value on markers added + by calling AddMarker. This is intended to further restrict what may be used for a denom when a generic marker is + created. + +- **Max Supply** (math.Int) - A value indicating the maximum supply level allowed for any added marker \ No newline at end of file diff --git a/docs/sdk/marker/10_governance.md b/docs/sdk/marker/10_governance.md new file mode 100644 index 000000000..021dc42b5 --- /dev/null +++ b/docs/sdk/marker/10_governance.md @@ -0,0 +1,129 @@ +# Governance Proposal Control + +The marker module supports an extensive amount of control over markers via governance proposal. This allows a +marker to be defined where no single account is allowed to make modifications and yet it is still possible to +issue change requests through passing a governance proposal. + + + - [Add Marker Proposal](#add-marker-proposal) + - [Supply Increase Proposal](#supply-increase-proposal) + - [Supply Decrease Proposal](#supply-decrease-proposal) + - [Set Administrator Proposal](#set-administrator-proposal) + - [Remove Administrator Proposal](#remove-administrator-proposal) + - [Change Status Proposal](#change-status-proposal) + - [Withdraw Escrow Proposal](#withdraw-escrow-proposal) + - [Set Denom Metadata Proposal](#set-denom-metadata-proposal) + + + +## Add Marker Proposal + +AddMarkerProposal defines a governance proposal to create a new marker. + +In a typical add marker situation the `UnrestrictedDenomRegex` parameter would be used to enforce longer denom +values (preventing users from creating coins with well known symbols such as BTC, ETH, etc). Markers added +via governance proposal are only limited by the more generic Coin Validation Denom expression enforced by the +bank module. + +A further difference from the standard add marker flow is that governance proposals to add a marker can directly +set a marker to the `Active` status with the appropriate minting operations performed immediately. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L15-L34 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- The marker request contains an invalid denom value +- The marker already exists +- The amount of coin in circulation could not be set. + - There is already coin in circulation [perhaps from genesis] and the configured supply is less than this amount and + it is not possible to burn sufficient coin to make the requested supply match actual supply +- The mint operation fails for any reason (see bank module) + +## Supply Increase Proposal + +SupplyIncreaseProposal defines a governance proposal to administer a marker and increase total supply of the marker +through minting coin and placing it within the marker or assigning it directly to an account. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L36-L47 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- The requested supply exceeds the configuration parameter for `MaxSupply` + +## Supply Decrease Proposal + +SupplyDecreaseProposal defines a governance proposal to administer a marker and decrease the total supply through +burning coin held within the marker + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L49-L59 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- Marker does not allow governance control (`AllowGovernanceControl`) +- The marker account itself is not holding sufficient supply to cover the amount of coin requested to burn +- The amount of resulting supply would be less than zero + +The chain will panic and halt if: +- The bank burn operation fails for any reason (see bank module) + +## Set Administrator Proposal + +SetAdministratorProposal defines a governance proposal to administer a marker and set administrators with specific +access on the marker + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L61-L71 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- The marker does not exist +- Marker does not allow governance control (`AllowGovernanceControl`) +- Any of the access grants are invalid + +## Remove Administrator Proposal + +RemoveAdministratorProposal defines a governance proposal to administer a marker and remove all permissions for a +given address + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L73-L83 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- The marker does not exist +- Marker does not allow governance control (`AllowGovernanceControl`) +- The address to be removed is not present + +## Change Status Proposal + +ChangeStatusProposal defines a governance proposal to administer a marker to change its status + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L85-L94 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- Marker does not allow governance control (`AllowGovernanceControl`) +- The requested status is invalid +- The new status is not a valid transition from the current status +- For destroyed markers + - The supply of the marker is greater than zero and the amount held by the marker account does not equal this value + resulting in the failure to burn all remaining supply. + +## Withdraw Escrow Proposal + +WithdrawEscrowProposal defines a governance proposal to withdraw escrow coins from a marker + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L96-L107 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- Marker does not allow governance control (`AllowGovernanceControl`) +- The marker account is not holding sufficient assets to cover the requested withdraw amounts. + +## Set Denom Metadata Proposal + +SetDenomMetadataProposal defines a governance proposal to set the metadata for a denom. + ++++ https://github.com/provenance-io/provenance/blob/22740319ba4b3ba268b3720d4bee36d6c6b06b40/proto/provenance/marker/v1/proposals.proto#L109-L117 + +This request is expected to fail if: +- The governance proposal format (title, description, etc) is invalid +- Marker does not allow governance control (`AllowGovernanceControl`) diff --git a/docs/sdk/marker/11_authorization.md b/docs/sdk/marker/11_authorization.md new file mode 100644 index 000000000..a68052d2f --- /dev/null +++ b/docs/sdk/marker/11_authorization.md @@ -0,0 +1,25 @@ +# Authorization + +The marker module supports granting authorizations for restricted coin transfers. This is implemented using +the `authz` module's `Authorization` interface. + +``` +// MarkerTransferAuthorization gives the grantee permissions to execute +// a restricted coin transfer on behalf of the granter's account. +message MarkerTransferAuthorization { + option (cosmos_proto.implements_interface) = "Authorization"; + + // transfer_limit is the total amount the grantee can transfer + repeated cosmos.base.v1beta1.Coin transfer_limit = 1 + [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins"]; + + // allow_list specifies an optional list of addresses to whom the grantee can send tokens on behalf of the + // granter. If omitted, any recipient is allowed. + repeated string allow_list = 2; +} +``` + +With the `MarkerTransferAuthorization` a `granter` can allow a `grantee` to do transfers on their behalf. +A `transfer_limit` is required to be set for the `grantee`. +The `allow_list` is optional. +An empty list means any destination address is allowed, otherwise, the destination must be in the `allow_list`. \ No newline at end of file diff --git a/docs/sdk/marker/12_transfers.md b/docs/sdk/marker/12_transfers.md new file mode 100644 index 000000000..39ed8c6cf --- /dev/null +++ b/docs/sdk/marker/12_transfers.md @@ -0,0 +1,316 @@ +# Transfers + +There are some complex interactions involved with transfers of restricted coins. + + + - [General](#general) + - [Definitions](#definitions) + - [Transfer Permission](#transfer-permission) + - [Forced Transfers](#forced-transfers) + - [Required Attributes](#required-attributes) + - [Individuality](#individuality) + - [Deposits](#deposits) + - [Bypass Accounts](#bypass-accounts) + - [Send Restrictions](#send-restrictions) + - [Flowcharts](#flowcharts) + - [Quarantine Complexities](#quarantine-complexities) + +## General + +Accounting of restricted coins is handled by the bank module. Restricted funds can be moved using the bank module's `MsgSend` or `MsgMutliSend`. They can also be moved using the marker module's `MsgTransferRequest`. + +During such transfers several things are checked using a `SendRestrictionFn` injected into the bank module. This restriction is applied in almost all instances when funds are being moved between accounts. The exceptions are delegations, undelegations, minting, burning, and marker withdrawals. A `MsgTransferRequest` also bypasses the `SendRestrictionFn` in order to include the `admin` account in the logic. + + + +## Definitions + +### Transfer Permission + +One permission that can be granted to an address is `transfer`. The `transfer` permission is granted to accounts that represent a "Transfer Agent" or "Transfer Authority" for restricted marker tokens. An address with `transfer` permission can utilize `MsgTransferRequest` to move restricted funds from one account to another. If the marker allows forced transfer, the source account can be any account, otherwise, it must be the admin's own account. + +`MsgSend` and `MsgMultiSend` can also be used by an address with `transfer` permission to move funds out of their own account. + +### Forced Transfers + +A restricted coin marker can be configured to allow forced transfers. If allowed, an account with `transfer` permission can use a `MsgTransferRequest` to transfer the restricted coins out of almost any account to another. Forced transfer cannot be used to move restricted coins out of module accounts or smart contract accounts, though. + +### Required Attributes + +Required attributes allow a marker Transfer Authority to define a set of account attestations created with the name/attribute modules to certify an account as an approved holder of the token. Accounts that possess all of the required attributes are considered authorized by the Transfer Authority to receive the token from normal bank send operations without a specific Transfer Authority approval. Required attributes are only supported on restricted markers. + +For example, say account A has some restricted coins of a marker that has required attributes. Also say account B has all of those required attributes, and account C does not. Account A could use a `MsgSend` to send those restricted coins to account B. However, account B could not send them to account C (unless B also has `transfer` permission). + +If a restricted coin marker does not have any required attributes defined, the only way the funds can be moved is by someone with `transfer` permisison. + +### Individuality + +If multiple restricted coin denoms are being moved at once, each denom is considered separately. +For example, if the sender has `transfer` permission on one of them, it does not also apply to the other(s). + +### Deposits + +A deposit is when any funds are being sent to a marker's account. The funds being sent do not have to be in the denom of the destination marker. + +Whenever funds are being deposited into a marker, the sender must have `deposit` permission on the target marker. If the funds to deposit are restricted coins, the sender also needs `transfer` permission on the funds being moved; required attributes are not taken into account. + +### Bypass Accounts + +There are several hard-coded module account addresses that are given special consideration in the marker module's `SendRestrictionFn`: + +* `authtypes.FeeCollectorName` - Allows paying fees with restricted coins. +* `reward` - Allows reward programs to use restricted coins. +* `quarantine` - Allows quarantine and acceptance of quarantined coins. +* `gov` - Allows deposits to have quarantined coins. +* `distribution` - Allows collection of delegation rewards in restricted coins. +* `stakingtypes.BondedPoolName` - Allows delegation of restricted coins. +* `stakingtypes.NotBondedPoolName` - Allows delegation of restricted coins. + +All of these are treated equally in the application of a marker's send restrictions. + +For restricted markers without required attributes: +* If the `toAddr` is a bypass account, the `fromAddr` must have transfer authority. +* If the `fromAddr` is a bypass account, it's assumed that the funds got where they currently are because someone with transfer authority got them there, so this transfer is allowed. + +For restricted markers with required attributes: +* If the `toAddr` is a bypass account, the transfer is allowed regardless of whether the `fromAddr` has transfer authority. It's assumed that the next destination's attributes will be properly checked before allowing the funds to leave the bypass account. +* If the `fromAddr` is a bypass account, the `toAddr` must have the required attributes. + +Bypass accounts are not considered during a `MsgTransferRequest`. + +## Send Restrictions + +The marker module injects a `SendRestrictionFn` into the bank module. This function is responsible for deciding whether any given transfer is allowed from the marker module's point of view. However, it is bypassed during a `MsgTransfer`. + +### Flowcharts + +#### The SendRestrictionFn + +This `SendRestrictionFn` uses the following flow. + +```mermaid +%%{ init: { 'flowchart': { 'curve': 'monotoneY'} } }%% +flowchart TD + start[["SendRestrictionFn(Sender, Receiver, Amount)"]] + qhasbp{{"Does context have bypass?"}} + nextd["Get next Denom from Amount."] + vsd[["validateSendDenom(Sender, Receiver, Denom)"]] + isdok{{"Is Denom transfer allowed?"}} + mored{{"Does Amount have another Denom?"}} + ok(["Send allowed."]) + style ok fill:#bbffaa,stroke:#1b8500,stroke-width:3px + denied(["Send denied."]) + style denied fill:#ffaaaa,stroke:#b30000,stroke-width:3px + start --> qhasbp + qhasbp ------>|yes| ok + qhasbp -.->|no| denomloop + subgraph denomloop ["Denom Loop"] + isdok -->|yes| mored + vsd --> isdok + mored -->|yes| nextd + nextd --> vsd + end + mored -.->|no| ok + isdok -.->|no| denied + + style denomloop fill:#bbffff + linkStyle 8 stroke:#b30000,color:#b30000 + linkStyle 1,7 stroke:#1b8500,color:#1b8500 +``` + +#### validateSendDenom + +Each `Denom` is checked using `validateSendDenom`, which has this flow: + +```mermaid +%%{ init: { 'flowchart': { 'curve': 'monotoneY'} } }%% +flowchart TD + start[["validateSendDenom(Sender, Receiver, Denom)"]] + qisrdep{{"Is Receiver a restricted coin marker account?"}} + qhasdep{{"Does Sender have Deposit\non Receiver marker?"}} + qisrc{{"Is Denom a restricted coin?"}} + qisdeny{{"Is Sender on marker's deny list?"}} + qhastrans{{"Does Sender have\ntransfer for Denom?"}} + qisdep{{"Is Receiver a marker account?"}} + qmhasattr{{"Does Denom have\nrequired attributes?"}} + qissbp{{"Is Sender a\nbypass account?"}} + qisrbp{{"Is Receiver a\nbypass account?"}} + qrhasattr{{"Does Receiver have\nthe required attributes?"}} + ok(["Denom transfer allowed."]) + style ok fill:#bbffaa,stroke:#1b8500,stroke-width:3px + denied(["Send denied."]) + style denied fill:#ffaaaa,stroke:#b30000,stroke-width:3px + start --> qisrdep + qisrdep -->|yes| qhasdep + qisrdep -.->|no| qisrc + qhasdep -.->|no| denied + qhasdep -->|yes| qisrc + qisrc -->|yes| qisdeny + qisrc -.->|no| ok + qisdeny -->|yes| denied + qisdeny -.->|no| qhastrans + qhastrans -.->|no| qisdep + qhastrans -->|yes| ok + qisdep -->|yes| denied + qisdep -.->|no| qmhasattr + qmhasattr -.->|no| qissbp + qmhasattr -->|yes| qisrbp + qissbp -..->|no| denied + qissbp --->|yes| ok + qisrbp -.->|no| qrhasattr + qisrbp -->|yes| ok + qrhasattr -.->|no| denied + qrhasattr -->|yes| ok + + linkStyle 3,7,11,15,19 stroke:#b30000,color:#b30000 + linkStyle 6,10,16,18,20 stroke:#1b8500,color:#1b8500 +``` + +#### MsgTransferRequest + +A `MsgTransferRequest` bypasses the `SendRestrictionFn` and applies its own logic. A `MsgTransferRequest` only allows for a single coin amount, i.e. there's only one `Denom` to consider. + +```mermaid +%%{ init: { 'flowchart': { 'curve': 'monotoneY'} } }%% +flowchart TD + start[["TransferCoin(Sender, Receiver, Admin)"]] + qisrc{{"Is Denom a restricted coin?"}} + qhast{{"Does Admin have transfer for Denom?"}} + qadminfrom{{"Does Sender == Admin?"}} + qallowft{{"Is forced transfer allowed for Denom?"}} + qauthz{{"Has Sender granted Admin\npermission with authz?"}} + qmodacc{{"Is Sender a\nmodule account?"}} + qblocked{{"Is Receiver an\naddress blocked by\nthe bank module?"}} + ok(["Transfer allowed."]) + style ok fill:#bbffaa,stroke:#1b8500,stroke-width:3px + denied(["Transfer denied."]) + style denied fill:#ffaaaa,stroke:#b30000,stroke-width:3px + start --> qisrc + qisrc -->|yes| qhast + qisrc -.->|no| denied + qhast -->|yes| qadminfrom + qhast -.->|no| denied + qadminfrom -->|yes| qblocked + qadminfrom -.->|no| qallowft + qallowft -->|yes| qmodacc + qallowft -.->|no| qauthz + qmodacc -.->|no| qblocked + qmodacc -->|yes| denied + qauthz -->|yes| qblocked + qauthz -.->|no| denied + qblocked -.->|no| ok + qblocked -->|yes| denied + + linkStyle 2,4,10,12,14 stroke:#b30000,color:#b30000 + linkStyle 13 stroke:#1b8500,color:#1b8500 +``` + +### Quarantine Complexities + +There are some noteable complexities involving restricted coins and quarantined accounts. + +#### Sending Restricted Coins to a Quarantined Account + +The marker module's `SendRestrictionFn` is applied before the quarantine module's. So, when funds are being sent to a quarantined account, the marker module runs its check using the original `Sender` and `Receiver` (i.e. the `Receiver` is not `QFH`). + +If the `Receiver` is a quarantined account, we can assume that it is neither a marker, nor a bypass account. Then, assuming the `Sender` is not on the deny list, the `validateSendDenom` flow can be simplified to this for restricted coins. + +```mermaid +%%{ init: { 'flowchart': { 'curve': 'monotoneY'} } }%% +flowchart LR + vsd[["validateSendDenom(Sender, Receiver, Denom)"]] + transq{{"Does Sender have\ntransfer for Denom?"}} + mreqattr{{"Does Denom have\nrequired attributes?"}} + treqattr{{"Does Receiver have\nthose attributes?"}} + ok(["Denom transfer allowed."]) + style ok fill:#bbffaa,stroke:#1b8500,stroke-width:3px + denied(["Send denied."]) + style denied fill:#ffaaaa,stroke:#b30000,stroke-width:3px + transq -->|yes| ok + transq -.->|no| mreqattr + mreqattr -->|yes| treqattr + mreqattr -.->|no| denied + treqattr -->|yes| ok + treqattr -.->|no| denied + + linkStyle 3,5 stroke:#b30000,color:#b30000 + linkStyle 0,4 stroke:#1b8500,color:#1b8500 +``` + +If the `Send` is allowed, and the `Receiver` is a quarantined account, the quarantine module's `SendRestrictionFn` will then change the `Send`'s destination to `QFH` (the Quarantined-funds-holder account) and make a record of the transfer. The `Send` then transfers funds from the `Sender` to `QFH`. + +The marker's `SendRestrictionFn` should never have `QFH` as a `Receiver`. The only way this would happen is if `MsgSend` is used to send funds directly to `QFH`. + +If `MsgTransferRequest` is used to transfer a restricted coin to a quarantined account, the standard `MsgTransferRequest` logic is applied (bypassing the marker module's `SendRestrictionFn`). The quarantine module's `SendRestrictionFn` is not bypassed, though, so the funds still go to the `QFH`. + +#### Accepting Quarantined Restricted Coins + +Once funds have been sent to `QFH`, the `Receiver` will probably want to accept them, and have them sent to their account. They issue an `Accept` to the quarantine module which utilizes the bank module's `Send` functionality to try to transfer funds from `QFH` to the `Receiver`. + +`QFH` is a bypass account. Since `Receiver` is a quarantined account, we can assume that it is neither a marker nor bypass account. So, the `validateSendDenom` flow can be simplified to this for restricted coins. + +```mermaid +%%{ init: { 'flowchart': { 'curve': 'monotoneY'} } }%% +flowchart LR + vsd[["validateSendDenom(Sender, Receiver, Denom)"]] + mreqattr{{"Does Denom have\nrequired attributes?"}} + treqattr{{"Does Receiver have\nthose attributes?"}} + ok(["Denom transfer allowed."]) + style ok fill:#bbffaa,stroke:#1b8500,stroke-width:3px + denied(["Send denied."]) + style denied fill:#ffaaaa,stroke:#b30000,stroke-width:3px + mreqattr -->|yes| treqattr + mreqattr -.->|no| ok + treqattr -->|yes| ok + treqattr -.->|no| denied + + linkStyle 3 stroke:#b30000,color:#b30000 + linkStyle 1,2 stroke:#1b8500,color:#1b8500 + +``` + +If the `Send` is allowed, the requested funds are transferred from `QFH` to `Receiver`. + +If the `Send` is denied, the funds remain with `QFH`. + +An important subtle part of this process is the rechecking of `Receiver` attributes. It's possible for the initial send to be okay (causing funds to be quarantined), then later, during this `Accept`, the send is not okay, and the quarantined funds are effectively locked with`QFH` until the `Receiver` gets the required attributes. + +If the marker does not have required attributes though, it's assumed that they were originally sent by someone with transfer authority, so they are allowed to continue from here too. + +#### Successful Quarantine and Accept Sequence + +When restricted coin funds are sent to a quarantined account (1), the marker's `SendRestrictionFn` is called using the original `Sender` and `Receiver` (2). Then, the quarantine's `SendRestrictionFn` is called (4) which will return `QFH` for the new destination (5). Funds are then transferred from `Sender` to `QFH` (6). + +When the `Receiver` attempts to `Accept` those quarantined funds (7), the marker's `SendRestrictionFn` is called again, this time using `QFH` (as the sender) and `Receiver` (9). The quarantine's `SendRestrictionFn` is bypassed (11), so the destination is not changed (12). Funds are then transferred from `QFH` to `Receiver` (13). + +```mermaid +sequenceDiagram + autonumber + actor Sender + actor Receiver + participant Bank Module + participant Quarantine Module + participant Marker Restriction + participant Quarantine Restriction + participant QFH + Sender ->>+ Bank Module: Send(sender, receiver) + Bank Module ->>+ Marker Restriction: Is this send from Sender to Receiver allowed? + Marker Restriction -->>- Bank Module: Yes + Bank Module ->>+ Quarantine Restriction: Is Receiver quarantined? + Quarantine Restriction -->>- Bank Module: Yes. Change destination to QFH. + Sender ->> QFH: Funds transferred from Sender to QFH. + deactivate Bank Module + + Note over Sender,QFH: Some Time Later + + Receiver ->>+ Quarantine Module: Accept(receiver, sender) + Quarantine Module ->> Bank Module: Send(QFH, receiver) + activate Bank Module + Bank Module ->>+ Marker Restriction: Is this send from QFH to Receiver allowed? + Marker Restriction -->>- Bank Module: Yes + Bank Module ->>+ Quarantine Restriction: Is Receiver quarantined? + Quarantine Restriction -->>- Bank Module: Restriction bypassed. No change. + QFH ->> Receiver: Funds transferred from QFH to Receiver. + deactivate Bank Module + deactivate Quarantine Module +``` diff --git a/docs/sdk/marker/README.md b/docs/sdk/marker/README.md new file mode 100644 index 000000000..d723d3971 --- /dev/null +++ b/docs/sdk/marker/README.md @@ -0,0 +1,44 @@ +# `x/marker` + +## Abstract + +This document specifies the marker module of the Provenance blockchain. + +The marker module provides the capability for creation and management of +fungible tokens on the Provenance blockchain. Various types of tokens can +be represented including standard coins and restricted coins (securities). + +Further the marker module allows for coins to be fixed upon creation or +managed by an identified list of accounts, or through the governance +proposal process. +## Context + +Using the blockchain as a ledger requires the ability to track fungible and non-fungible resources on chain with +fractional ownership. Each of these resources requires rules governing supply and exchange. Examples of resources +include fractional ownership in the network itself (stake), credits for network resources (gas/fees), fractional +ownership of an arbitrary asset (metadata/scope), and omnibus account balances (stable coins). The rules governing the +asset must be enforced by the blockchain itself such that the entity controlling the asset must abide by these +rules and is not able to invalidate these processes. These enforced constraints are what provide the value and +support trust in the platform itself. + +## Overview + +The marker module provides various tools for defining fractional ownership and control. Markers can be created and +managed by normal Msg requests or through the governance process. A marker can have many users with explicit control +or none at all. A marker can be used to create a coin that can be freely exchange or one that requires facilitated +transfer by the marker itself when invoked by a user/process with appropriate permissions. + +## Contents + +1. **[State](01_state.md)** +1. **[State_transitions](02_state_transitions.md)** +1. **[Messages](03_messages.md)** +1. **[Begin Block](04_begin_block.md)** +1. **[End Block](05_end_block.md)** +1. **[Hooks](06_hooks.md)** +1. **[Events](07_events.md)** +1. **[Telemetry](08_telemetry.md)** +1. **[Params](09_params.md)** +1. **[Governance](10_governance.md)** +1. **[Authorization](11_authorization.md)** +1. **[Transfers](12_transfers.md)** diff --git a/docs/sdk/metadata/01_concepts.md b/docs/sdk/metadata/01_concepts.md new file mode 100644 index 000000000..0e1796db5 --- /dev/null +++ b/docs/sdk/metadata/01_concepts.md @@ -0,0 +1,172 @@ +# Metadata Concepts + +The metadata service manages things that define and reference off-chain data. +There are three categories of things stored: entries, specifications, and object store locators. +Each entry and specification has a unique Metadata Address that is often simply called its "id". +Additionally, several indexes are created to help with linking and iterating over related messages. + + + - [Entries](#entries) + - [Specifications](#specifications) + - [Metadata Addresses](#metadata-addresses) + - [MetadataAddress Example Implementations](#metadataaddress-example-implementations) + - [MetadataAddress General Guidelines](#metadataaddress-general-guidelines) + - [Indexes](#indexes) + - [Signing Requirements](#signing-requirements) + - [Scope Value Owner Address Requirements](#scope-value-owner-address-requirements) + - [Smart Contract Requirements](#smart-contract-requirements) + - [With Party Rollup Required](#with-party-rollup-required) + - [Without Party Rollup Required](#without-party-rollup-required) + + + +## Entries + +The term "entries" refers to scopes, sessions, and records. +See [Entries](02_state.md#entries) for details. + +## Specifications + +The term "specifications" refers to scope specifications, contract specifications, and record specifications. +See [Specifications](02_state.md#specifications) for details. + +## Metadata Addresses + +Entries and Specifications must each have a unique metadata address. +These addresses are byte arrays that are commonly referered to as "ids". +As strings, they should be represented using the bech32 address format. +The addresses for the different messages have specific formats that help facilitate grouping and indexing. +All addresses start with a single byte that identifies the type, and are followed by 16 bytes commonly called a UUID. +Some address types contain other elements too. + +### MetadataAddress Example Implementations + +* Go: [address.go](https://github.com/provenance-io/provenance/blob/main/x/metadata/spec/examples/go/metadata_address.go) +* Kotlin: [MetadataAddress.kt](https://github.com/provenance-io/provenance/blob/main/x/metadata/spec/examples/kotlin/src/main/kotlin/MetadataAddress.kt) +* Javascript: [metadata-address.js](https://github.com/provenance-io/provenance/blob/main/x/metadata/spec/examples/js/lib/metadata-address.js) + +### MetadataAddress General Guidelines + +* As strings, the metadata addresses are represented using the bech32 address format. +* The `*IdInfo` messages defined in `metadata.proto` (e.g. `RecordIdInfo`) are used in response messages and contain a breakdown of a metadata address. +* Variables that hold the addresses as byte arrays should end in `_id`. +* Variables that hold the addresses as bech32 strings should end in `_addr`. +* Variables that hold UUIDs as strings should use the standard UUID format and end in `_uuid`. +* If a variable is a byte array that ends in `_id`, then it should be the full Metadata Address byte array. +* String variables that end in `_id` should only be used in input messages. + They should be flexible fields that can accept either the bech32 string version of the Metadata Address byte array, or a UUID in the standard UUID string format. +* If a variable ends in `_addr`, then it should be the bech32 string version of the Metadata Address byte array. +* If a variable ends in `_uuid`, then it should be a UUID in the standard UUID string format. + The exception to this is the byte array fields in the `*IdInfo` messages that represent the id broken into its various parts. + For example, `ScopeIdInfo.scope_id_scope_uuid` represents the UUID portion of the `scope_id`, and is left as a byte array, + but `ScopeIdInfo.scope_uuid` is the standard UUID string representation of those bytes. + +## Indexes + +Indexes are specially formatted entries in the kvstore used to find associated things. + +The keys contain all of the relevant information. +They are byte arrays with three parts: +1. Type byte: A single byte representing the type of index. +1. Part 1: Address of the starting thing in the association. +1. Part 2: Address of the entry to find. + +The values are always a single byte: `0x01`. + +The general use of them is to create a prefix using the type byte and part 1. +Then use that prefix to iterate over all keys with that same prefix. +During iteration, remove the prefix from the current entry's key in order to get the key of the thing to find. + +## Signing Requirements + +Scopes have a `require_party_rollup` boolean field that dictates most signer requirements for a scope and all it's sessions and records. +There are also special signer considerations related to a scope's `value_owner_address` field. + +### Scope Value Owner Address Requirements + +These requirements are applied regardless of a scope's `require_party_rollup` value. +They are applied when writing new scopes, updating existing scopes, and deleting scopes. + +If a scope with a value owner address is being updated, and the ONLY change is to that value owner address, then ONLY these signer requirements are applied and all other signer requirements are ignored. +If the value owner address is not changing, these requirements do not apply. +If the value owner address is changing as well as one or more other fields, these requirements apply as well as the other signer requirements. + +* When a value owner address is being set to a marker, at least one of the signers must have deposit permission on that marker. +* When a value owner address is a marker and is being changed, at least one of the signers must have withdraw permission on that marker. +* When a value owner address is a non-marker address, and is being changed, that existing address must be one of the signers. +* When a value owner address is empty, and is being changed, standard scope signer requirements are also applied even if that's the only change to the scope. + +### Smart Contract Requirements + +The following are requirements related to smart contract usage of the `x/metadata` module: + +* A party with a smart contract address MUST have the `PROVENANCE` role. +* A party with the `PROVENANCE` role MUST have the address of a smart contract. +* When a smart contract signs a message, it MUST be first or have only smart-contract signers before it, and SHOULD include the invoker address(es) after. +* When a smart contract is a signer, it must either be a party/owner, or have authorizations (via `x/authz`) from all signers after it. +* If a smart contract is a signer, but not a party, it cannot be the only signer, and cannot be the last signer. + +### With Party Rollup Required + +When a scope has `require_party_rollup = true`, all session parties must also be listed in the scope owners. +The use of `optional = true` parties is also allowed. +The party types (aka roles) defined in specifications, in conjunction with they entry's parties dictate the signers that are required (in addition to any `optional = false` parties). + +For example, if a scope has an `optional = false` `CONTROLLER` (address `A`), and two `optional = true` `SERVICER`s (addresses `B`, and `C`), +and a session is being written using a contract spec that requires just a `SERVICER` signature, then to write that session, +either address `B` or `C` must be a signer (due to the contract spec), and `A` must also sign (because they're `optional = false` in the scope). + +#### Writing or Deleting a Scope With Party Rollup + +* All roles required by the scope spec must have a party in the owners. +* If not new: + * All `optional = false` existing owners must be signers. + * All roles required by the scope spec must have a signer and associated party from the existing scope. +* Scope value owner address requirements are applied. + +#### Writing a Session With Party Rollup + +* All proposed session parties must be present in this scope's owners. +* All `optional = false` scope owners must be signers. +* If new: + * All roles required by the contract spec must have a signer and associated party in the proposed session. +* If not new: + * All roles required by the contract spec must have a signer and associated party in the existing session. + * All roles required by the contract spec must have parties in the proposed session. + * All `optional = false` existing parties must also be signers. + +#### Writing a Record With Party Rollup + +* All roles required by the record spec must have a signer and associated party in the session. +* All `optional = false` scope owners and session parties must be signers. +* If the record is changing sessions, all `optional = false` previous session parties must be signers. + +#### Deleting a Record With Party Rollup + +* All roles required by the record spec must have a signer and associated party in the scope. +* All `optional = false` scope owners must be signers. + +### Without Party Rollup Required + +When a scope has `require_party_rollup = false`, then `optional = true` parties are not allowed in the scope or any of its sessions. + +#### Writing or Deleting a Scope Without Party Rollup + +* All roles required by the scope spec must have a party in the owners. +* If not new, all existing owners must sign. +* Scope value owner address requirements are applied. + +#### Writing a Session Without Party Rollup + +* All roles required by the contract spec must have a party in the session parties. +* All scope owners must sign. + +#### Writing a Record Without Party Rollup + +* All roles required by the record spec must have a party in the session parties. +* All session parties must sign. +* If the record is changing to a new session, all previous session parties must sign. + +#### Deleting a Record Without Party Rollup + +* All scope owners must sign. diff --git a/docs/sdk/metadata/02_state.md b/docs/sdk/metadata/02_state.md new file mode 100644 index 000000000..eb6755134 --- /dev/null +++ b/docs/sdk/metadata/02_state.md @@ -0,0 +1,455 @@ +# Metadata State + +The Metadata module manages the state of several types of entries related to off-chain information. + + + - [Entries](#entries) + - [Scopes](#scopes) + - [Sessions](#sessions) + - [Records](#records) + - [Specifications](#specifications) + - [Scope Specifications](#scope-specifications) + - [Contract Specifications](#contract-specifications) + - [Record Specifications](#record-specifications) + - [Object Store Locators](#object-store-locators) + + + +## Entries + +The term "entries" refers to scopes, sessions, and records. +They group and identify information. + +### Scopes + +A scope is a high-level grouping of information combined with some access control. + +* A scope must conform to a pre-determined scope specification. +* A scope is used to group together many sessions and records. + +#### Scope Keys (Metadata Addresses) + +Byte Array Length: `17` + +| Byte range | Description | +|------------|---------------------| +| 0 | `0x00` | +| 1-16 | UUID of this scope. | + +* Field Name: `Scope.scope_id` +* Bech32 HRP: `"scope"` +* Bech32 Example: `"scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel"` + +#### Scope Values + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/scope.proto#L69-L96 + +```protobuf +// Scope defines a root reference for a collection of records owned by one or more parties. +message Scope { + option (gogoproto.goproto_stringer) = false; + + // Unique ID for this scope. Implements sdk.Address interface for use where addresses are required in Cosmos + bytes scope_id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"scope_id\"" + ]; + // the scope specification that contains the specifications for data elements allowed within this scope + bytes specification_id = 2 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"specification_id\"" + ]; + // These parties represent top level owners of the records within. These parties must sign any requests that modify + // the data within the scope. These addresses are in union with parties listed on the sessions. + repeated Party owners = 3 [(gogoproto.nullable) = false]; + // Addresses in this list are authorized to receive off-chain data associated with this scope. + repeated string data_access = 4 [(gogoproto.moretags) = "yaml:\"data_access\""]; + // An address that controls the value associated with this scope. Standard blockchain accounts and marker accounts + // are supported for this value. This attribute may only be changed by the entity indicated once it is set. + string value_owner_address = 5 [(gogoproto.moretags) = "yaml:\"value_owner_address\""]; + // Whether all parties in this scope and its sessions must be present in this scope's owners field. + // This also enables use of optional=true scope owners and session parties. + bool require_party_rollup = 6 [(gogoproto.moretags) = "yaml:\"require_party_rollup\""]; +} +``` + +#### Scope Indexes + +Scopes by owner: +* Type byte: `0x17` +* Part 1: The owner address (length byte then value bytes) +* Part 2: All bytes of the scope key + + +Scopes by Scope Specification: +* Type byte: `0x11` +* Part 1: All bytes of the scope specification key +* Part 2: All bytes of the scope key + +Scopes by value owner: +* Type byte: `0x18` +* Part 1: The value owner address (length byte then value bytes) +* Part 2: All bytes of the scope key + + + +### Sessions + +A session is a grouping of records and the parties in charge of those records. + +* A session must conform to a pre-determined contract specification. +* A session groups together a collection of records. +* A session is part of exactly one scope. + +#### Session Keys (Metadata Addresses) + +Byte Array Length: `33` + +| Byte range | Description | +|------------|------------------------------------------------| +| 0 | `0x01` | +| 1-16 | UUID of the scope that this session is part of | +| 17-32 | UUID of this session | + +* Field Name: `Session.session_id` +* Bech32 HRP: `"session"` +* Bech32 Example: `"session1qxge0zaztu65tx5x5llv5xc9zts9sqlch3sxwn44j50jzgt8rshvqyfrjcr"` + +#### Session Values + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/scope.proto#L98-L124 + +```protobuf +// Session defines an execution context against a specific specification instance. +// The context will have a specification and set of parties involved. +// +// NOTE: When there are no more Records within a Scope that reference a Session, the Session is removed. +message Session { + option (gogoproto.goproto_stringer) = false; + + bytes session_id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"session_id\"" + ]; + // unique id of the contract specification that was used to create this session. + bytes specification_id = 2 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"specification_id\"" + ]; + // parties is the set of identities that signed this contract + repeated Party parties = 3 [(gogoproto.nullable) = false]; + // name to associate with this session execution context, typically classname + string name = 4 [(gogoproto.jsontag) = "type", (gogoproto.moretags) = "yaml:\"type\""]; + // context is a field for storing client specific data associated with a session. + bytes context = 5; + // Created by, updated by, timestamps, version number, and related info. + AuditFields audit = 99 [(gogoproto.moretags) = "yaml:\"audit,omitempty\""]; +} +``` + +#### Session Indexes + +There are no extra indexes involving sessions. +Note, though, that the session key is constructed in a way that automatically indexes sessions by scope. + + + +### Records + +A record identifies the inputs and outputs of a process. +It is conceptually similar to the values involved in a method call. + +* A record must conform to a pre-determined record specification. +* A record is part of exactly one scope. +* A record is part of exactly one session. + +#### Record Keys (Metadata Addresses) + +Byte Array Length: `33` + +| Byte range | Description | +|------------|-------------------------------------------------------------| +| 0 | `0x02` | +| 1-16 | UUID of the scope that this record is part of | +| 17-32 | First 16 bytes of the SHA256 checksum of this record's name | + +* Field Name: `Record.record_id` +* Bech32 HRP: `"record"` +* Bech32 Example: `"record1q2ge0zaztu65tx5x5llv5xc9ztsw42dq2jdvmdazuwzcaddhh8gmu3mcze3"` + +#### Record Values + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/scope.proto#L126-L150 + +```protobuf +// A record (of fact) is attached to a session or each consideration output from a contract +message Record { + option (gogoproto.goproto_stringer) = false; + + // name/identifier for this record. Value must be unique within the scope. Also known as a Fact name + string name = 1 [(gogoproto.jsontag) = "id", (gogoproto.moretags) = "yaml:\"id\""]; + // id of the session context that was used to create this record (use with filtered kvprefix iterator) + bytes session_id = 2 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"session_id\"" + ]; + // process contain information used to uniquely identify an execution on or off chain that generated this record + Process process = 3 [(gogoproto.nullable) = false]; + // inputs used with the process to achieve the output on this record + repeated RecordInput inputs = 4 [(gogoproto.nullable) = false]; + // output(s) is the results of executing the process on the given process indicated in this record + repeated RecordOutput outputs = 5 [(gogoproto.nullable) = false]; + // specification_id is the id of the record specification that was used to create this record. + bytes specification_id = 6 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"specification_id\"" + ]; +} +``` + +#### Record Indexes + +There are no extra indexes involving records. +Note, though, that the record key is constructed in a way that automatically indexes records by scope. + + + +## Specifications + +The term "specifications" refers to scope specifications, contract specifications, and record specifications. +They define validation parameters for the various entries. +Ideally, specifications will be used for multiple entries. + +### Scope Specifications + +A scope specification defines validation parameters for scopes. +They group together contract specifications and define roles that must be involved in a scope. + +#### Scope Specification Keys (Metadata Addresses) + +Byte Array Length: `17` + +| Byte range | Description | +|------------|----------------------------------| +| 0 | `0x04` | +| 1-16 | UUID of this scope specification | + +* Field Name: `ScopeSpecification.specification_id` +* Bech32 HRP: `"scopespec"` +* Bech32 Example: `"scopespec1qnwg86nsatx5pl56muw0v9ytlz3qu3jx6m"` + +#### Scope Specification Values + ++++ https://github.com/provenance-io/provenance/blob/4192fd46ea56574bb4ffcacb632d8bb54a720b28/proto/provenance/metadata/v1/specification.proto#L36-L58 + +```protobuf +// ScopeSpecification defines the required parties, resources, conditions, and consideration outputs for a contract +message ScopeSpecification { + option (gogoproto.goproto_stringer) = false; + + // unique identifier for this specification on chain + bytes specification_id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"specification_id\"" + ]; + // General information about this scope specification. + Description description = 2; + // Addresses of the owners of this scope specification. + repeated string owner_addresses = 3 [(gogoproto.moretags) = "yaml:\"owner_addresses\""]; + // A list of parties that must be present on a scope (and their associated roles) + repeated PartyType parties_involved = 4 [(gogoproto.moretags) = "yaml:\"parties_involved\""]; + // A list of contract specification ids allowed for a scope based on this specification. + repeated bytes contract_spec_ids = 5 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"contract_spec_ids\"" + ]; +} +``` + +#### Scope Specification Indexes + +Scope specifications by owner: +* Type byte: `0x19` +* Part 1: The owner address (length byte then value bytes) +* Part 2: All bytes of the scope specification key + + +Scope Specifications by contract specification: +* Type byte: `0x14` +* Part 1: All bytes of the contract specification key +* Part 2: All bytes of the scope specification key + + +Scopes by Scope Specification: +* Type byte: `0x11` +* Part 1: All bytes of the scope specification key +* Part 2: All bytes of the scope key + + + +### Contract Specifications + +A contract specification defines validation parameters for sessions. +They contain source information and roles that must be involved in a session. +They also group together record specifications. + +A contract specification can be part of multiple scope specifications. + +#### Contract Specification Keys (Metadata Addresses) + +Byte Array Length: `17` + +| Byte range | Description | +|------------|-------------------------------------| +| 0 | `0x03` | +| 1-16 | UUID of this contract specification | + +* Field Name: `ContractSpecification.specification_id` +* Bech32 HRP: `"contractspec"` +* Bech32 Example: `"contractspec1q000d0q2e8w5say53afqdesxp2zqzkr4fn"` + +#### Contract Specification Values + ++++ https://github.com/provenance-io/provenance/blob/4192fd46ea56574bb4ffcacb632d8bb54a720b28/proto/provenance/metadata/v1/specification.proto#L60-L86 + +```protobuf +// ContractSpecification defines the required parties, resources, conditions, and consideration outputs for a contract +message ContractSpecification { + option (gogoproto.goproto_stringer) = false; + + // unique identifier for this specification on chain + bytes specification_id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"specification_id\"" + ]; + // Description information for this contract specification + Description description = 2; + // Address of the account that owns this specificaiton + repeated string owner_addresses = 3 [(gogoproto.moretags) = "yaml:\"owner_addresses\""]; + // a list of party roles that must be fullfilled when signing a transaction for this contract specification + repeated PartyType parties_involved = 4 [(gogoproto.moretags) = "yaml:\"parties_involved\""]; + // Reference to a metadata record with a hash and type information for the instance of code that will process this + // contract + oneof source { + // the address of a record on chain that represents this contract + bytes resource_id = 5 [(gogoproto.customtype) = "MetadataAddress", (gogoproto.moretags) = "yaml:\"resource_id\""]; + // the hash of contract binary (off-chain instance) + string hash = 6; + } + // name of the class/type of this contract executable + string class_name = 7 [(gogoproto.moretags) = "yaml:\"class_name\""]; +} +``` + +#### Contract Specification Indexes + +Contract specifications by owner: +* Type byte: `0x20` +* Part 1: The owner address (length byte then value bytes) +* Part 2: All bytes of the contract specification key + + +Scope Specifications by contract specification: +* Type byte: `0x14` +* Part 1: All bytes of the contract specification key +* Part 2: All bytes of the scope specification key + + + +### Record Specifications + +A record specification defines validation parameters for records. +They contain expected inputs and outputs and parties that must be involved in a record. + +A record specification is part of exactly one contract specification. + +#### Record Specification Keys (Metadata Addresses) + +Byte Array Length: `33` + +| Byte range | Description | +|------------|------------------------------------------------------------------------------| +| 0 | `0x05` | +| 1-16 | UUID of the contract specification that this record specification is part of | +| 17-32 | First 16 bytes of the SHA256 checksum of this record specification's name | + +* Field Name: `RecordSpecification.specification_id` +* Bech32 HRP: `"recspec"` +* Bech32 Example: `"recspec1qh00d0q2e8w5say53afqdesxp2zw42dq2jdvmdazuwzcaddhh8gmuqhez44"` + +#### Record Specification Values + ++++ https://github.com/provenance-io/provenance/blob/4192fd46ea56574bb4ffcacb632d8bb54a720b28/proto/provenance/metadata/v1/specification.proto#L88-L108 + +```protobuf +// RecordSpecification defines the specification for a Record including allowed/required inputs/outputs +message RecordSpecification { + option (gogoproto.goproto_stringer) = false; + + // unique identifier for this specification on chain + bytes specification_id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "MetadataAddress", + (gogoproto.moretags) = "yaml:\"specification_id\"" + ]; + // Name of Record that will be created when this specification is used + string name = 2; + // A set of inputs that must be satisified to apply this RecordSpecification and create a Record + repeated InputSpecification inputs = 3; + // A type name for data associated with this record (typically a class or proto name) + string type_name = 4 [(gogoproto.moretags) = "yaml:\"type_name\""]; + // Type of result for this record specification (must be RECORD or RECORD_LIST) + DefinitionType result_type = 5 [(gogoproto.moretags) = "yaml:\"result_type\""]; + // Type of party responsible for this record + repeated PartyType responsible_parties = 6 [(gogoproto.moretags) = "yaml:\"responsible_parties\""]; +} +``` + +#### Record Specification Indexes + +There are no extra indexes involving record specifications. +Note, though, that the record key is constructed in a way that automatically indexes record specifications by contract specification. + + + +## Object Store Locators + +An object store locator indicates the location of off-chain data. + +#### Object Store Locator Keys + +Byte Array Length: `21` + +| Byte range | Description | +|--------------|---------------------------------------------------------| +| 0 | `0x21` | +| 1 | Owner address length, either `0x14` (20) or `0x20` (32) | +| 2-(21 or 33) | The bytes of the owner address. | + +#### Object Store Locator Values + ++++ https://github.com/provenance-io/provenance/blob/main/proto/provenance/metadata/v1/objectstore.proto#L9-L16 + +```protobuf +// Defines an Locator object stored on chain, which represents a owner( blockchain address) associated with a endpoint +// uri for it's associated object store. +message ObjectStoreLocator { + // account address the endpoint is owned by + string owner = 1; + // locator endpoint uri + string locator_uri = 2; +} +``` + +#### Object Store Locator Indexes + +There are no extra indexes involving object store locators. diff --git a/docs/sdk/metadata/03_messages.md b/docs/sdk/metadata/03_messages.md new file mode 100644 index 000000000..bd08e10bb --- /dev/null +++ b/docs/sdk/metadata/03_messages.md @@ -0,0 +1,662 @@ +# Metadata Messages + +In this section we describe the processing of the metadata messages and the corresponding updates to the state. +All created/modified state objects specified by each message are defined within the [state](02_state.md) section. + +These endpoints, requests, and responses are defined in [tx.proto](https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto). + + + - [Entries](#entries) + - [Msg/WriteScope](#msgwritescope) + - [Msg/DeleteScope](#msgdeletescope) + - [Msg/AddScopeDataAccess](#msgaddscopedataaccess) + - [Msg/DeleteScopeDataAccess](#msgdeletescopedataaccess) + - [Msg/AddScopeOwner](#msgaddscopeowner) + - [Msg/DeleteScopeOwner](#msgdeletescopeowner) + - [Msg/UpdateValueOwners](#msgupdatevalueowners) + - [Msg/MigrateValueOwner](#msgmigratevalueowner) + - [Msg/WriteSession](#msgwritesession) + - [Msg/WriteRecord](#msgwriterecord) + - [Msg/DeleteRecord](#msgdeleterecord) + - [Specifications](#specifications) + - [Msg/WriteScopeSpecification](#msgwritescopespecification) + - [Msg/DeleteScopeSpecification](#msgdeletescopespecification) + - [Msg/WriteContractSpecification](#msgwritecontractspecification) + - [Msg/DeleteContractSpecification](#msgdeletecontractspecification) + - [Msg/AddContractSpecToScopeSpec](#msgaddcontractspectoscopespec) + - [Msg/DeleteContractSpecFromScopeSpec](#msgdeletecontractspecfromscopespec) + - [Msg/WriteRecordSpecification](#msgwriterecordspecification) + - [Msg/DeleteRecordSpecification](#msgdeleterecordspecification) + - [Object Store Locators](#object-store-locators) + - [Msg/BindOSLocator](#msgbindoslocator) + - [Msg/DeleteOSLocator](#msgdeleteoslocator) + - [Msg/ModifyOSLocator](#msgmodifyoslocator) + - [Account Data](#account-data) + - [Msg/SetAccountData](#msgsetaccountdata) + - [Authz Grants](#authz-grants) + + + +--- +## Entries + +### Msg/WriteScope + +A scope is created or updated using the `WriteScope` service method. + +Scopes are identified using their `scope_id`. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L76-L99 + +The `scope_uuid` field is optional. +It should be a uuid formated as a string using the standard UUID format. +If supplied, it will be used to generate the appropriate scope id for use in the `scope.scope_id` field. + +The `spec_uuid` field is optional. +It should be a uuid formated as a string using the standard UUID format. +If supplied, it will be used to generate the appropriate scope specification id for use in the `scope.specification_id` field. + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L101-L105 + +#### Expected failures + +This service message is expected to fail if: +* The `scope_id` is missing or invalid. +* The `specification_id` is missing or invalid. +* The `owners` list is empty. +* Any of the owner `address` values aren't bech32 address strings. +* Any of the `data_access` values aren't bech32 address strings. +* A `value_owner_address` is provided that isn't a bech32 address string. +* The `signers` do not have permission to write the scope. + +--- +### Msg/DeleteScope + +A scope is deleted using the `DeleteScope` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L107-L120 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L122-L123 + +#### Expected failures + +This service message is expected to fail if: +* No scope exists with the given `scope_id`. +* The `signers` do not have permission to delete the scope. + +--- +### Msg/AddScopeDataAccess + +Addresses can be added to a scope's data access list using the `AddScopeDataAccess` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L125-L142 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L144-L145 + +#### Expected failures + +This service message is expected to fail if: +* Any provided address is invalid. +* Any provided address is already in the scope's data access list. +* The `signers` do not have permission to update the scope. + +--- +### Msg/DeleteScopeDataAccess + +Addresses can be deleted from a scope's data access list using the `DeleteScopeDataAccess` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L147-L164 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L166-L167 + +#### Expected failures + +This service message is expected to fail if: +* Any provided address is not already in the scope's data access list. +* The `signers` do not have permission to update the scope. + +--- +### Msg/AddScopeOwner + +Scope owners can be added to a scope using the `AddScopeOwner` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L169-L186 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L188-L189 + +#### Expected failures + +This service message is expected to fail if: +* Any new party is invalid. +* An `optional = true` party is being added to a `require_party_rollup = false` scope. +* The `signers` do not have permission to update the scope. + +--- +### Msg/DeleteScopeOwner + +Scope owners can be deleted from a scope using the `DeleteScopeOwner` service method. +All owner parties with any of the provided addresses will be removed from the scope. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L191-L208 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L210-L211 + +#### Expected failures + +This service message is expected to fail if: +* Any provided `owners` (addresses) are not an address in a party in the scope. +* The resulting scope owners do not meet scope specification requirements. +* The `signers` do not have permission to update the scope. + +--- +### Msg/UpdateValueOwners + +The value owner address of one or more scopes can be updated using the `UpdateValueOwners` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/37cdb0c84db7b2f91aef057a606c5ba6aece06a1/proto/provenance/metadata/v1/tx.proto#L219-L235 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/37cdb0c84db7b2f91aef057a606c5ba6aece06a1/proto/provenance/metadata/v1/tx.proto#L237-L238 + +#### Expected failures + +This service message is expected to fail if: +* The new value owner address is invalid. +* Any of the provided scope ids are not metadata scope identifiers or do not exist. +* The signers are not allowed to update the value owner address of a provided scope. + +--- +### Msg/MigrateValueOwner + +All scopes with a given existing value owner address can be updated to have a new proposed value owner address using the `MigrateValueOwner` endpoint. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/37cdb0c84db7b2f91aef057a606c5ba6aece06a1/proto/provenance/metadata/v1/tx.proto#L240-L252 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/37cdb0c84db7b2f91aef057a606c5ba6aece06a1/proto/provenance/metadata/v1/tx.proto#L254-L255 + +#### Expected failures + +This service message is expected to fail if: +* Either the existing or proposed values are not valid bech32 addresses. +* The existing address is not a value owner on any scopes. +* The signers are not allowed to update the value owner address of a scope being updated. + +--- +### Msg/WriteSession + +A session is created or updated using the `WriteSession` service method. + +Sessions are identified using their `session_id`. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L213-L238 + +The `session_id_components` field is optional. +If supplied, it will be used to generate the appropriate session id for use in the `session.session_id` field. + +The `spec_uuid` field is optional. +It should be a uuid formated as a string using the standard UUID format. +If supplied, it will be used to generate the appropriate contract specification id for use in the `session.specification_id` field. + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L253-L257 + +#### Expected failures + +This service message is expected to fail if: +* The `session_id` is missing or invalid. +* The `specification_id` is missing or invalid. +* The `parties` list is empty. +* Any of the `parties` have an `address` that isn't a bech32 address string. +* Any of the `parties` have a `role` of `unspecified`. +* The `audit.message` string is longer than 200 characters. +* The `specification_id` is being changed. +* The session is being updated, but no `name` is provided. +* The session's scope cannot be found. +* The session's contract specification does not exist. +* The `signers` do not have permission to write the session. +* The `audit` fields are changed. + +--- +### Msg/WriteRecord + +A record is created or updated using the `WriteRecord` service method. + +Records are identified using their `name` and `session_id`. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L260-L289 + +The `session_id_components` field is optional. +If supplied, it will be used to generate the appropriate session id for use in the `record.session_id` field. + +The `contract_spec_uuid` field is optional. +It should be a uuid formated as a string using the standard UUID format. +If supplied, it will be used with `record.name` to generate the appropriate record specification id for use in the `record.specification_id` field. + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L291-L295 + +#### Expected failures + +This service message is expected to fail if: +* The `session_id` is missing or invalid. +* The `specification_id` is provided but invalid. +* An entry in `inputs` does not have a `name`. +* An entry in `inputs` does not have a `source`. +* An entry in `inputs` has a `source` type that doesn't match the input's `status`. +* An entry in `inputs` has a `record_id` `source` but the `record_id` is missing or invalid. +* An entry in `inputs` does not have a `type_name`. +* An entry in `outputs` has a `status` of `unspecified`. +* An entry in `outputs` has a `status` of `pass` or `fail`, and doesn't have a `hash`. +* The `name` is missing. +* The `process.method` is missing. +* The `process.name` is missing. +* The `process.process_id` is missing. +* A record is being updated and the `name` values are different. +* A record is being updated and the `session` values are different. +* A record is being updated and the `specification_id` values are different. +* The record's scope cannot be found. +* The record's session cannot be found. +* The record's contract specification cannot be found. +* The record's record specification cannot be found. +* There are duplicate `inputs` by `name`. +* An entry in `inputs` exists that is not part of the record specification. +* The `inputs` list does not contain one or more inputs defined in the record specification. +* An entry in `inputs` has a `type_name` different from its input specification. +* An entry in `inputs` has a `source` type that doesn't match the input specification. +* An entry in `inputs` has a `source` value that doesn't match the intput specification. +* The record specification has a result type of `record` but there isn't exactly one entry in `outputs`. +* The record specification has a result type of `record_list` but the `outputs` list is empty. +* The `signers` do not have permission to write the record. + +--- +### Msg/DeleteRecord + +A record is deleted using the `DeleteRecord` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L297-L310 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L312-L313 + +#### Expected failures + +This service message is expected to fail if: +* No record exists with the given `record_id`. +* The `signers` do not have permission to delete the record. + + + +--- +## Specifications + +### Msg/WriteScopeSpecification + +A scope specification is created or updated using the `WriteScopeSpecification` service method. + +Scope specifications are identified using their `specification_id`. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L315-L333 + +The `spec_uuid` field is optional. +It should be a uuid formated as a string using the standard UUID format. +If supplied, it will be used to generate the appropriate scope specification id for use in the `specification.specification_id` field. + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L335-L339 + +#### Expected failures + +This service message is expected to fail if: +* The `specificatio_id` is missing or invalid. +* The `description` has an empty `name` or the `name` is longer than 200 characters. +* The `description` has a `description` longer than 5000 characters. +* The `description` has a `website_url` or `icon_url` that is empty or longer than 2048 characters. +* The `description` has a `website_url` or `icon_url` that has a protocol other than `http`, `https`, or `data`. +* The `owners` list is empty. +* One of the entries in `owners` is not a valid bech32 address. +* The `parties_involved` list is empty. +* One of the entries in `contract_spec_ids` is invalid. +* One of the entries in `contract_spec_ids` does not exist. +* One or more `owners` of the existing scope specification are not `signers`. + +--- +### Msg/DeleteScopeSpecification + +A scope specification is deleted using the `DeleteScopeSpecification` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L341-L354 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L356-L357 + +#### Expected failures + +This service message is expected to fail if: +* No scope specification exists with the given `specification_id` +* One or more `owners` are not `signers`. + +--- +### Msg/WriteContractSpecification + +A contract specification is created or updated using the `WriteContractSpecification` service method. + +Contract specifications are identified using their `specification_id`. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L359-L377 + +The `spec_uuid` field is optional. +It should be a uuid formated as a string using the standard UUID format. +If supplied, it will be used to generate the appropriate contract specification id for use in the `specification.specification_id` field. + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L379-L384 + +#### Expected failures + +This service message is expected to fail if: +* The `specification_id` is missing or invalid. +* The `description` has an empty `name` or the `name` is longer than 200 characters. +* The `description` has a `description` longer than 5000 characters. +* The `description` has a `website_url` or `icon_url` that is empty or longer than 2048 characters. +* The `description` has a `website_url` or `icon_url` that has a protocol other than `http`, `https`, or `data`. +* The `owners` list is empty. +* One of the entries in `owners` is not a valid bech32 address. +* The `parties_involved` list is empty. +* The `source` is empty. +* The `source` is a resource id, that is invalid. +* The `source` is a hash that is empty. +* The `class_name` is empty or longer than 1000 characters. +* One or more `owners` of the existing contract specification are not `signers`. + +--- +### Msg/DeleteContractSpecification + +A contract specification is deleted using the `DeleteContractSpecification` service method. + +This will also delete all record specifications associated with this contract specification. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L437-L450 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L452-L453 + +#### Expected failures + +This service message is expected to fail if: +* No contract specification exists with the given `specification_id` +* One or more `owners` are not `signers`. +* One of the record specifications associated with this contract specification cannot be deleted. + +--- +### Msg/AddContractSpecToScopeSpec + +A contract specification can be added to a scope specification using the `AddContractSpecToScopeSpec` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L386-L406 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L408-L409 + +#### Expected failures + +This service message is expected to fail if: +* The `contract_specification_id` is missing or invalid. +* The `scope_specification_id` is missing or invalid. +* The contract specification does not exist. +* The scope specification does not exist. +* * The contract specification is already allowed in the provided scope specification. +* One or more of the scope specification `owners` are not `signers`. + +--- +### Msg/DeleteContractSpecFromScopeSpec + +A contract specification can be removed from a scope specification using the `AddContractSpecToScopeSpec` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L411-L431 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L433-L435 + +#### Expected failures + +This service message is expected to fail if: +* The `contract_specification_id` is missing or invalid. +* The `scope_specification_id` is missing or invalid. +* The scope specification does not exist. +* The contract specification is not already allowed in the provided scope specification. +* One or more of the scope specification `owners` are not `signers`. + +--- +### Msg/WriteRecordSpecification + +A record specification is created or updated using the `WriteRecordSpecification` service method. + +Record specifications are identified using their `specification_id`. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L455-L473 + +The `contract_spec_uuid` field is optional. +It should be a uuid formated as a string using the standard UUID format. +If supplied, it will be used with the `specification.name` to generate the appropriate record specification id for use in the `specification.specification_id` field. + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L475-L480 + +#### Expected failures + +This service message is expected to fail if: +* The `specification_id` is missing or invalid. +* No contract specification exists with the given contract specification id portion of the `specification_id`. +* One or more contract specification `owners` are not `signers`. +* The `name` is longer than 200 characters. +* One of the `input_specifications` is missing a `name` or its `name` is longer than 200 characters. +* One of the `input_specifications` is missing a `type_name` or its `type_name` is longer than 1000 characters. +* One of the `input_specifications` is missing a `source`. +* One of the `input_specifications` has a `source` that is a record id that is missing or invalid. +* One of the `input_specifications` has a `source` that is a hash that is missing. +* The `type_name` is longer than 1000 characters. +* The `responsible_parties` list is empty. +* The `result_type` is unspecified. +* A record specification is being updated and the `name` values are different. +* A record specification is being updated and the `specification_id` values are different. + +--- +### Msg/DeleteRecordSpecification + +A record specification is deleted using the `DeleteRecordSpecification` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L482-L495 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L497-L498 + +#### Expected failures + +This service message is expected to fail if: +* No record specification exists with the given `specification_id`. +* No contract specification exists with the given contract specification id portion of the `specification_id`. +* One or more `owners` of the contracts specification are not `signers`. + +--- +## Object Store Locators + +### Msg/BindOSLocator + +An Object Store Locator entry is created using the `BindOSLocator` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L500-L506 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L508-L511 + +#### Expected failures + +This service message is expected to fail if: +* The `owner` is missing. +* The `owner` is not a valid bech32 address. +* The `uri` is empty. +* The `uri` is not a valid URI. +* The `owner` does not match an existing account. +* An object store locator already exists for the given `owner`. + +--- +### Msg/DeleteOSLocator + +An Object Store Locator entry is deleted using the `DeleteOSLocator` service method. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L513-L520 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L522-L525 + +#### Expected failures + +This service message is expected to fail if: +* The `owner` is missing. +* The `owner` is not a valid bech32 address. +* The `uri` is empty. +* The `uri` is not a valid URI. +* The `owner` does not match an existing account. +* An object store locator does not exist for the given `owner`. + +--- +### Msg/ModifyOSLocator + +An Object Store Locator entry is updated using the `DeleteOSLocator` service method. + +Object Store Locators are identified by their `owner`. + +#### Request + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L527-L533 + +#### Response + ++++ https://github.com/provenance-io/provenance/blob/812cb97c77036b8df59e10845fa8a04f4ba84c43/proto/provenance/metadata/v1/tx.proto#L535-L538 + +#### Expected failures + +This service message is expected to fail if: +* The `owner` is missing. +* The `owner` is not a valid bech32 address. +* The `uri` is empty. +* The `uri` is not a valid URI. +* The `owner` does not match an existing account. +* An object store locator does not exist for the given `owner`. + +--- +## Account Data + +### Msg/SetAccountData + +Simple data (a string) can be associated with scopes using the `SetAccountData` service method. + ++++ https://github.com/provenance-io/provenance/blob/e83f1955cba07e2ba87790c4487d22632ae9e69c/proto/provenance/metadata/v1/tx.proto#L589-L606 + ++++ https://github.com/provenance-io/provenance/blob/e83f1955cba07e2ba87790c4487d22632ae9e69c/proto/provenance/metadata/v1/tx.proto#L608-L609 + +This service message is expected to fail if: +* The provided address is not a scope id. +* The provided scope id does not exist. +* The signers do not have authority to update the entry. +* The provided value is too long (as defined by the attribute module params). + +--- +## Authz Grants + +Authz requires the use of fully qualified message type URLs when applying grants to an address. See [04_authz.md](04_authz.md) for more details. + +Fully qualified `metadata` message type URLs: +- `/provenance.metadata.v1.MsgWriteScopeRequest` +- `/provenance.metadata.v1.MsgDeleteScopeRequest` +- `/provenance.metadata.v1.MsgAddScopeDataAccessRequest` +- `/provenance.metadata.v1.MsgDeleteScopeDataAccessRequest` +- `/provenance.metadata.v1.MsgAddScopeOwnerRequest` +- `/provenance.metadata.v1.MsgDeleteScopeOwnerRequest` +- `/provenance.metadata.v1.MsgUpdateValueOwnersRequest` +- `/provenance.metadata.v1.MsgMigrateValueOwnerRequest` +- `/provenance.metadata.v1.MsgWriteSessionRequest` +- `/provenance.metadata.v1.MsgWriteRecordRequest` +- `/provenance.metadata.v1.MsgDeleteRecordRequest` +- `/provenance.metadata.v1.MsgWriteScopeSpecificationRequest` +- `/provenance.metadata.v1.MsgDeleteScopeSpecificationRequest` +- `/provenance.metadata.v1.MsgWriteContractSpecificationRequest` +- `/provenance.metadata.v1.MsgDeleteContractSpecificationRequest` +- `/provenance.metadata.v1.MsgAddContractSpecToScopeSpecRequest` +- `/provenance.metadata.v1.MsgDeleteContractSpecFromScopeSpecRequest` +- `/provenance.metadata.v1.MsgWriteRecordSpecificationRequest` +- `/provenance.metadata.v1.MsgDeleteRecordSpecificationRequest` +- `/provenance.metadata.v1.MsgBindOSLocatorRequest` +- `/provenance.metadata.v1.MsgDeleteOSLocatorRequest` +- `/provenance.metadata.v1.MsgModifyOSLocatorRequest` +- `/provenance.metadata.v1.MsgSetAccountDataRequest` diff --git a/docs/sdk/metadata/04_authz.md b/docs/sdk/metadata/04_authz.md new file mode 100644 index 000000000..21eb983ff --- /dev/null +++ b/docs/sdk/metadata/04_authz.md @@ -0,0 +1,79 @@ +# Metadata Authz + +The `authz` implementation in the `metadata` module checks for granted permission in cases when there are missing signatures. + +A `GenericAuthorization` should be used using the message type URLs now documented in [03_messages.md](03_messages.md). + + + - [Code](#code) + - [CLI](#cli) + - [Special allowances](#special-allowances) + +--- + +## Code + +Grant: +```golang +granter := ... // Bech32 AccAddress +grantee := ... // Bech32 AccAddress +a := authz.NewGenericAuthorization(types.TypeURLMsgWriteScopeRequest) +err := s.app.AuthzKeeper.SaveGrant(s.ctx, grantee, granter, a, now.Add(time.Hour)) +``` + +Delete: +```golang +err := s.app.AuthzKeeper.DeleteGrant(s.ctx, grantee, granter, types.TypeURLMsgWriteScopeRequest) +``` +Revoke: +```golang +granter := ... // Bech32 AccAddress +grantee := ... // Bech32 AccAddress +msgRevoke := authz.NewMsgRevoke(granter, grantee, types.TypeURLMsgWriteScopeRequest) +res, err := s.app.AuthzKeeper.Revoke(s.ctx, msgRevoke) +``` + +## CLI + +Grant: +```console +$ provenanced tx authz grant --from +``` + +Revoke: +```console +$ provenanced tx authz revoke --from +``` + + +See [GenericAuthorization](https://docs.cosmos.network/v0.47/build/modules/authz#genericauthorization) specification for more details. + +## Special allowances + +Some messages in the `metadata` module have hierarchies. A grant on a parent message type will also work for any of +its message subtypes, but not the other way around. Therefore, authorizations on these messages are `one way`. + +- An authorization on `MsgWriteScopeRequest` works for any of the listed message subtypes: + - `MsgAddScopeDataAccessRequest` + - `MsgAddScopeDataAccessRequest` + - `MsgDeleteScopeDataAccessRequest` + - `MsgAddScopeOwnerRequest` + - `MsgDeleteScopeOwnerRequest` + +- An authorization on `MsgWriteSessionRequest` works for any of the listed message subtypes: + - `MsgWriteRecordRequest` + +- An authorization on `MsgWriteScopeSpecificationRequest` works for any of the listed message subtypes: + - `MsgAddContractSpecToScopeSpecRequest` + - `MsgDeleteContractSpecFromScopeSpecRequest` + +- An authorization on `MsgWriteContractSpecificationRequest` works for any of the listed message subtypes: + - `MsgWriteRecordSpecificationRequest` + +- An authorization on `MsgDeleteContractSpecificationRequest` works for any of the listed message subtypes: + - `MsgDeleteRecordSpecificationRequest` + + +Notes: + +An authorization on a `Write` endpoint for an entry/spec will NOT work for its `Delete` endpoint. diff --git a/docs/sdk/metadata/05_queries.md b/docs/sdk/metadata/05_queries.md new file mode 100644 index 000000000..d0183cd14 --- /dev/null +++ b/docs/sdk/metadata/05_queries.md @@ -0,0 +1,456 @@ +# Metadata Queries + +In this section we describe the queries available for looking up metadata information. +All state objects specified by each message are defined within the [state](02_state.md) section. + +Each entry or specification state object is wrapped with an `*_id_info` message containing information about that state object's address/id. +By default, the `*_id_info` fields are populated with information about the metadata address(es) involved, but each applicable request has an `exclude_id_info` flag to cause those field to not be populated in the result. +If a requested entry or specification isn't found, an empty wrapper containing only id info is returned. + + + - [Params](#params) + - [Scope](#scope) + - [ScopesAll](#scopesall) + - [Sessions](#sessions) + - [SessionsAll](#sessionsall) + - [Records](#records) + - [RecordsAll](#recordsall) + - [Ownership](#ownership) + - [ValueOwnership](#valueownership) + - [ScopeSpecification](#scopespecification) + - [ScopeSpecificationsAll](#scopespecificationsall) + - [ContractSpecification](#contractspecification) + - [ContractSpecificationsAll](#contractspecificationsall) + - [RecordSpecificationsForContractSpecification](#recordspecificationsforcontractspecification) + - [RecordSpecification](#recordspecification) + - [RecordSpecificationsAll](#recordspecificationsall) + - [GetByAddr](#getbyaddr) + - [OSLocatorParams](#oslocatorparams) + - [OSLocator](#oslocator) + - [OSLocatorsByURI](#oslocatorsbyuri) + - [OSLocatorsByScope](#oslocatorsbyscope) + - [OSAllLocators](#osalllocators) + - [AccountData](#accountdata) + + +--- +## Params + +The `Params` query gets the parameters of the metadata module. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L247-L251 + +There are no inputs for this query. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L253-L260 + + +--- +## Scope + +The `Scope` query gets a scope. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L262-L282 + +The `scope_id`, if provided, must either be scope uuid, e.g. `91978ba2-5f35-459a-86a7-feca1b0512e0` or a scope address, +e.g. `scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel`. The session addr, if provided, must be a bech32 session address, +e.g. `session1qxge0zaztu65tx5x5llv5xc9zts9sqlch3sxwn44j50jzgt8rshvqyfrjcr`. The record_addr, if provided, must be a +bech32 record address, e.g. `record1q2ge0zaztu65tx5x5llv5xc9ztsw42dq2jdvmdazuwzcaddhh8gmu3mcze3`. + +* If only a `scope_id` is provided, that scope is returned. +* If only a `session_addr` is provided, the scope containing that session is returned. +* If only a `record_addr` is provided, the scope containing that record is returned. +* If more than one of `scope_id`, `session_addr`, and `record_addr` are provided, and they don't refer to the same scope, +a bad request is returned. + +Providing a `session_addr` or `record_addr` does not limit the sessions and records returned (if requested). +Those parameters are only used to find the scope. + +By default, sessions and records are not included. +Set `include_sessions` and/or `include_records` to true to include sessions and/or records. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L284-L295 + + +--- +## ScopesAll + +The `ScopesAll` query gets all scopes. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L307-L316 + +The only input to this query is pagination information. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L318-L327 + + +--- +## Sessions + +The `Sessions` query gets sessions. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L330-L352 + +The `scope_id` can either be scope uuid, e.g. `91978ba2-5f35-459a-86a7-feca1b0512e0` or a scope address, e.g. +`scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel`. Similarly, the `session_id` can either be a uuid or session address, e.g. +`session1qxge0zaztu65tx5x5llv5xc9zts9sqlch3sxwn44j50jzgt8rshvqyfrjcr`. The `record_addr`, if provided, must be a +bech32 record address, e.g. `record1q2ge0zaztu65tx5x5llv5xc9ztsw42dq2jdvmdazuwzcaddhh8gmu3mcze3`. + +* If only a `scope_id` is provided, all sessions in that scope are returned. +* If only a `session_id` is provided, it must be an address, and that single session is returned. +* If the `session_id` is a uuid, then either a `scope_id` or `record_addr` must also be provided, and that single session +is returned. +* If only a `record_addr` is provided, the session containing that record will be returned. +* If a `record_name` is provided then either a `scope_id`, `session_id` as an address, or `record_addr` must also be +provided, and the session containing that record will be returned. + +A bad request is returned if: +* The `session_id` is a uuid and is provided without a `scope_id` or `record_addr`. +* A `record_name` is provided without any way to identify the scope (e.g. a `scope_id`, a `session_id` as an address, or +a `record_addr`). +* Two or more of `scope_id`, `session_id` as an address, and `record_addr` are provided and don't all refer to the same +scope. +* A `record_addr` (or `scope_id` and `record_name`) is provided with a `session_id` and that session does not contain such +a record. +* A `record_addr` and `record_name` are both provided, but reference different records. + +By default, the scope and records are not included. +Set `include_scope` and/or `include_records` to true to include the scope and/or records. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L354-L365 + + +--- +## SessionsAll + +The `SessionsAll` query gets all sessions. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L377-L386 + +The only input to this query is pagination information. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L388-L397 + + +--- +## Records + +The `Records` query gets records. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L399-L422 + +The `record_addr`, if provided, must be a bech32 record address, e.g. +`record1q2ge0zaztu65tx5x5llv5xc9ztsw42dq2jdvmdazuwzcaddhh8gmu3mcze3`. The `scope_id` can either be scope uuid, e.g. +`91978ba2-5f35-459a-86a7-feca1b0512e0 `or a scope address, e.g. `scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel`. Similarly, +the `session_id` can either be a uuid or session address, e.g. +`session1qxge0zaztu65tx5x5llv5xc9zts9sqlch3sxwn44j50jzgt8rshvqyfrjcr`. The name is the name of the record you're +interested in. + +* If only a `record_addr` is provided, that single record will be returned. +* If only a `scope_id` is provided, all records in that scope will be returned. +* If only a `session_id` (or scope_id/session_id), all records in that session will be returned. +* If a `name` is provided with a `scope_id` and/or `session_id`, that single record will be returned. + +A bad request is returned if: +* The `session_id` is a uuid and no `scope_id` is provided. +* There are two or more of `record_addr`, `session_id`, and `scope_id`, and they don't all refer to the same scope. +* A `name` is provided, but not a `scope_id` and/or a `session_id`. +* A `name` and `record_addr` are provided and the name doesn't match the record_addr. + +By default, the scope and sessions are not included. +Set `include_scope` and/or `include_sessions` to true to include the scope and/or sessions. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L424-L435 + + +--- +## RecordsAll + +The `RecordsAll` query gets all records. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L447-L456 + +The only input to this query is pagination information. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L458-L467 + + +--- +## Ownership + +The `Ownership` query gets the ids of scopes owned by an address. + +A scope is owned by an address if the address is listed as either an owner, or the value owner. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L469-L477 + +The `address` should be a bech32 address string. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L479-L488 + + +--- +## ValueOwnership + +The `ValueOwnership` query gets gets the ids of scopes that list an address as the value owner. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L490-L498 + +The `address` should be a bech32 address string. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L500-L509 + + +--- +## ScopeSpecification + +The `ScopeSpecification` query gets a scope specification. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L511-L528 + +The `specification_id` can either be a uuid, e.g. `dc83ea70-eacd-40fe-9adf-1cf6148bf8a2` or a bech32 scope +specification address, e.g. `scopespec1qnwg86nsatx5pl56muw0v9ytlz3qu3jx6m`. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L530-L541 + + +--- +## ScopeSpecificationsAll + +The `ScopeSpecificationsAll` query gets all scope specifications. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L551-L560 + +The only input to this query is pagination information. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L562-L571 + + +--- +## ContractSpecification + +The `ContractSpecification` query gets a contract specification. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L573-L589 + +The `specification_id` can either be a uuid, e.g. `def6bc0a-c9dd-4874-948f-5206e6060a84`, a bech32 contract +specification address, e.g. `contractspec1q000d0q2e8w5say53afqdesxp2zqzkr4fn`, or a bech32 record specification +address, e.g. `recspec1qh00d0q2e8w5say53afqdesxp2zw42dq2jdvmdazuwzcaddhh8gmuqhez44`. If it is a record specification +address, then the contract specification that contains that record specification is looked up. + +By default, the record specifications for this contract specification are not included. +Set `include_record_specs` to true to include them in the result. + + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L591-L602 + + +--- +## ContractSpecificationsAll + +The `ContractSpecificationsAll` query gets all contract specifications. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L612-L621 + +The only input to this query is pagination information. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L623-L633 + + +--- +## RecordSpecificationsForContractSpecification + +The `RecordSpecificationsForContractSpecification` query gets all record specifications for a contract specification. + +The only difference between this query and `ContractSpecification` with `include_record_specs = true` is that +this query does not return the contract specification. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L635-L649 + +The `specification_id` can either be a uuid, e.g. `def6bc0a-c9dd-4874-948f-5206e6060a84`, a bech32 contract +specification address, e.g. `contractspec1q000d0q2e8w5say53afqdesxp2zqzkr4fn`, or a bech32 record specification +address, e.g. `recspec1qh00d0q2e8w5say53afqdesxp2zw42dq2jdvmdazuwzcaddhh8gmuqhez44`. If it is a record specification +address, then the contract specification that contains that record specification is used. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L651-L664 + + +--- +## RecordSpecification + +The `RecordSpecification` query gets a record specification. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L666-L683 + +The `specification_id` can either be a uuid, e.g. `def6bc0a-c9dd-4874-948f-5206e6060a84` or a bech32 contract specification +address, e.g. `contractspec1q000d0q2e8w5say53afqdesxp2zqzkr4fn`. +It can also be a record specification address, e.g. +`recspec1qh00d0q2e8w5say53afqdesxp2zw42dq2jdvmdazuwzcaddhh8gmuqhez44`. + +The `name` is the name of the record to look up. +It is required if the `specification_id` is a uuid or contract specification address. +It is ignored if the `specification_id` is a record specification address. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L685-L692 + + +--- +## RecordSpecificationsAll + +The `RecordSpecificationsAll` query gets all record specifications. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L702-L711 + +The only input to this query is pagination information. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L713-L723 + + +--- +## GetByAddr + +The `GetByAddr` query looks up metadata entries and/or specifications for a given list of addresses. +The results of this query are not wrapped with id information like the other queries, and only returns the exact entries requested. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L725-L729 + +The `addrs` can contain any valid metadata address bech32 strings. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L731-L747 + +Any invalid or nonexistent `addrs` will be in the `not_found` list. + +--- +## OSLocatorParams + +The `OSLocatorParams` query gets the parameters of the Object Store Locator sub-module. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L749-L753 + +There are no inputs for this query. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L755-L762 + + +--- +## OSLocator + +The `OSLocator` query gets an Object Store Locator for an address. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L764-L770 + +The `owner` should be a bech32 address string. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L772-L778 + + +--- +## OSLocatorsByURI + +The `OSLocatorsByURI` query gets the object store locators by URI. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L780-L788 + +The `uri` is string the URI to find object store locators for. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L790-L798 + + +--- +## OSLocatorsByScope + +The `OSLocatorsByScope` query gets the object store locators for the owners and value owner of a scope. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L800-L806 + +The `scope_id`, must either be scope uuid, e.g. `91978ba2-5f35-459a-86a7-feca1b0512e0` or a scope address, +e.g. `scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel` + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L808-L814 + + +--- +## OSAllLocators + +The `OSAllLocators` query gets all object store locators. + +This query is paginated. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L816-L822 + +The only input to this query is pagination information. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L824-L832 + +--- +## AccountData + +The `AccountData` query gets the account data associated with a scope. + +### Request ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L834-L843 + +The `metadata_addr` must be a scope id, e.g. `scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel`. + +### Response ++++ https://github.com/provenance-io/provenance/blob/3b77d267d4336deba89fc2196243e80952de51a1/proto/provenance/metadata/v1/query.proto#L845-L849 diff --git a/docs/sdk/metadata/06_events.md b/docs/sdk/metadata/06_events.md new file mode 100644 index 000000000..37b36e556 --- /dev/null +++ b/docs/sdk/metadata/06_events.md @@ -0,0 +1,249 @@ +# Metadata Events + +The metadata module emits the following events and telemetry information. + + + - [Generic](#generic) + - [EventTxCompleted](#eventtxcompleted) + - [Scope](#scope) + - [EventScopeCreated](#eventscopecreated) + - [EventScopeUpdated](#eventscopeupdated) + - [EventScopeDeleted](#eventscopedeleted) + - [Session](#session) + - [EventSessionCreated](#eventsessioncreated) + - [EventSessionUpdated](#eventsessionupdated) + - [EventSessionDeleted](#eventsessiondeleted) + - [Record](#record) + - [EventRecordCreated](#eventrecordcreated) + - [EventRecordUpdated](#eventrecordupdated) + - [EventRecordDeleted](#eventrecorddeleted) + - [Scope Specification](#scope-specification) + - [EventScopeSpecificationCreated](#eventscopespecificationcreated) + - [EventScopeSpecificationUpdated](#eventscopespecificationupdated) + - [EventScopeSpecificationDeleted](#eventscopespecificationdeleted) + - [Contract Specification](#contract-specification) + - [EventContractSpecificationCreated](#eventcontractspecificationcreated) + - [EventContractSpecificationUpdated](#eventcontractspecificationupdated) + - [EventContractSpecificationDeleted](#eventcontractspecificationdeleted) + - [Record Specification](#record-specification) + - [EventRecordSpecificationCreated](#eventrecordspecificationcreated) + - [EventRecordSpecificationUpdated](#eventrecordspecificationupdated) + - [EventRecordSpecificationDeleted](#eventrecordspecificationdeleted) + - [Object Store Locator](#object-store-locator) + - [EventOSLocatorCreated](#eventoslocatorcreated) + - [EventOSLocatorUpdated](#eventoslocatorupdated) + - [EventOSLocatorDeleted](#eventoslocatordeleted) + +--- +## Generic + +### EventTxCompleted + +This event is emitted whenever a TX has completed without issues. +It will usually be accompanied by one or more of the other events. + +| Attribute Key | Attribute Value | +| --------------------- | ------------------------------------------------- | +| Module | "metadata" | +| Endpoint | The name of the rpc called, e.g. "WriteScope" | +| Signers | List of bech32 address strings of the msg signers | + +--- +## Scope + +### EventScopeCreated + +This event is emitted whenever a new scope is written. + +| Attribute Key | Attribute Value | +| --------------------- | ------------------------------------------------- | +| ScopeAddr | The bech32 address string of the ScopeId | + +### EventScopeUpdated + +This event is emitted whenever an existing scope is updated. + +| Attribute Key | Attribute Value | +| --------------------- | ------------------------------------------------- | +| ScopeAddr | The bech32 address string of the ScopeId | + +### EventScopeDeleted + +This event is emitted whenever an existing scope is deleted. + +| Attribute Key | Attribute Value | +| --------------------- | ------------------------------------------------- | +| ScopeAddr | The bech32 address string of the ScopeId | + +--- +## Session + +### EventSessionCreated + +This event is emitted whenever a new session is written. + +| Attribute Key | Attribute Value | +| --------------------- | -------------------------------------------------- | +| SessionAddr | The bech32 address string of the SessionId | +| ScopeAddr | The bech32 address string of the session's ScopeId | + +### EventSessionUpdated + +This event is emitted whenever an existing session is updated. + +| Attribute Key | Attribute Value | +| --------------------- | -------------------------------------------------- | +| SessionAddr | The bech32 address string of the SessionId | +| ScopeAddr | The bech32 address string of the session's ScopeId | + +### EventSessionDeleted + +This event is emitted whenever an existing session is deleted. + +| Attribute Key | Attribute Value | +| --------------------- | -------------------------------------------------- | +| SessionAddr | The bech32 address string of the SessionId | +| ScopeAddr | The bech32 address string of the session's ScopeId | + +--- +## Record + +### EventRecordCreated + +This event is emitted whenever a new record is written. + +| Attribute Key | Attribute Value | +| --------------------- | --------------------------------------------------- | +| RecordAddr | The bech32 address string of the RecordId | +| SessionAddr | The bech32 address string of the record's SessionId | +| ScopeAddr | The bech32 address string of the record's ScopeId | + +### EventRecordUpdated + +This event is emitted whenever an existing record is updated. + +| Attribute Key | Attribute Value | +| --------------------- | --------------------------------------------------- | +| RecordAddr | The bech32 address string of the RecordId | +| SessionAddr | The bech32 address string of the record's SessionId | +| ScopeAddr | The bech32 address string of the record's ScopeId | + +### EventRecordDeleted + +This event is emitted whenever an existing record is deleted. + +| Attribute Key | Attribute Value | +| --------------------- | ------------------------------------------------- | +| RecordAddr | The bech32 address string of the RecordId | +| ScopeAddr | The bech32 address string of the record's ScopeId | + +--- +## Scope Specification + +### EventScopeSpecificationCreated + +This event is emitted whenever a new scope specification is written. + +| Attribute Key | Attribute Value | +| ---------------------- | ------------------------------------------------- | +| ScopeSpecificationAddr | The bech32 address string of the SpecificationId | + +### EventScopeSpecificationUpdated + +This event is emitted whenever an existing scope specification is updated. + +| Attribute Key | Attribute Value | +| ---------------------- | ------------------------------------------------- | +| ScopeSpecificationAddr | The bech32 address string of the SpecificationId | + +### EventScopeSpecificationDeleted + +This event is emitted whenever an existing scope specification is deleted. + +| Attribute Key | Attribute Value | +| ---------------------- | ------------------------------------------------- | +| ScopeSpecificationAddr | The bech32 address string of the SpecificationId | + +--- +## Contract Specification + +### EventContractSpecificationCreated + +This event is emitted whenever a new contract specification is written. + +| Attribute Key | Attribute Value | +| ------------------------- | ------------------------------------------------- | +| ContractSpecificationAddr | The bech32 address string of the SpecificationId | + +### EventContractSpecificationUpdated + +This event is emitted whenever an existing contract specification is updated. + +| Attribute Key | Attribute Value | +| ------------------------- | ------------------------------------------------- | +| ContractSpecificationAddr | The bech32 address string of the SpecificationId | + +### EventContractSpecificationDeleted + +This event is emitted whenever an existing contract specification is deleted. + +| Attribute Key | Attribute Value | +| ------------------------- | ------------------------------------------------- | +| ContractSpecificationAddr | The bech32 address string of the SpecificationId | + +--- +## Record Specification + +### EventRecordSpecificationCreated + +This event is emitted whenever a new record specification is written. + +| Attribute Key | Attribute Value | +| ------------------------- | ---------------------------------------------------------- | +| RecordSpecificationAddr | The bech32 address string of the SpecificationId | +| ContractSpecificationAddr | The bech32 address string of the Contract SpecificationId | + +### EventRecordSpecificationUpdated + +This event is emitted whenever an existing record specification is updated. + +| Attribute Key | Attribute Value | +| ------------------------- | ---------------------------------------------------------- | +| RecordSpecificationAddr | The bech32 address string of the SpecificationId | +| ContractSpecificationAddr | The bech32 address string of the Contract SpecificationId | + +### EventRecordSpecificationDeleted + +This event is emitted whenever an existing record specification is deleted. + +| Attribute Key | Attribute Value | +| ------------------------- | ---------------------------------------------------------- | +| RecordSpecificationAddr | The bech32 address string of the SpecificationId | +| ContractSpecificationAddr | The bech32 address string of the Contract SpecificationId | + +--- +## Object Store Locator + +### EventOSLocatorCreated + +This event is emitted whenever a new object store locator is written. + +| Attribute Key | Attribute Value | +| ---------------- | -------------------------------------- | +| Owner | The bech32 address string of the Owner | + +### EventOSLocatorUpdated + +This event is emitted whenever an existing object store locator is updated. + +| Attribute Key | Attribute Value | +| ---------------- | -------------------------------------- | +| Owner | The bech32 address string of the Owner | + +### EventOSLocatorDeleted + +This event is emitted whenever an existing object store locator is deleted. + +| Attribute Key | Attribute Value | +| ---------------- | -------------------------------------- | +| Owner | The bech32 address string of the Owner | diff --git a/docs/sdk/metadata/07_telemetry.md b/docs/sdk/metadata/07_telemetry.md new file mode 100644 index 000000000..76fd18101 --- /dev/null +++ b/docs/sdk/metadata/07_telemetry.md @@ -0,0 +1,122 @@ +# Metadata Events + +The metadata module emits the following events and telemetry information. + + + - [Counters](#counters) + - [Stored Objects](#stored-objects) + - [Stored Object: Keys](#stored-object-keys) + - [Stored Object: Labels](#stored-object-labels) + - [Stored Object: Label: Category](#stored-object-label-category) + - [Stored Object: Label: Object Type](#stored-object-label-object-type) + - [Object Actions](#object-actions) + - [Object Action: Keys](#object-action-keys) + - [Object Action: Labels](#object-action-labels) + - [Object Action: Label: Category](#object-action-label-category) + - [Object Action: Label: Object Type](#object-action-label-object-type) + - [Object Action: Label: Action](#object-action-label-action) + - [Timers](#timers) + - [TX Keys](#tx-keys) + - [Query Keys](#query-keys) + + + +--- +## Counters + +### Stored Objects + +This counter is used to get counts of things stored on the chain. + +When this module writes a new object to the chain, this counter is incremented by 1. +When this module deletes an object from the chain, this counter is decremented by 1. +When this module updates an object on the chain, this counter is not updated. + +#### Stored Object: Keys + +`"metadata"`, `"stored-object"` + +#### Stored Object: Labels + +`"category"`, `"object-type"` + +##### Stored Object: Label: Category + +This label groups the objects into a general type. + +The string for this label is `"category"`. + +Possible values: +- `"entry"` +- `"specification"` +- `"object-store-locator"` + +##### Stored Object: Label: Object Type + +This label specifically identifies objects. +Each value belongs to exactly one "category" label. + +The string for this label is `"object-type"`. + +Possible values: +- `"scope"` (is an `"entry"`) +- `"session"` (is an `"entry"`) +- `"record"` (is an `"entry"`) +- `"scope-specification"` (is a `"specification"`) +- `"contract-specification"` (is a `"specification"`) +- `"record-specification"` (is a `"specification"`) +- `"object-store-locator"` (is an `"object-store-locator"`) + + + +### Object Actions + +This counter is used to get counts of actions taken on the chain. + +Every time this module writes to or deletes from the chain, this counter is incremented. + +#### Object Action: Keys + +`"metadata"`, `"object-action"` + +#### Object Action: Labels + +`"category"`, `"object-type"`, `"action"` + +##### Object Action: Label: Category + +This is the same label used by the stored object counter: [Stored Object: Label: Category](#stored-object-label-category) + +##### Object Action: Label: Object Type + +This is the same label used by the stored object counter: [Stored Object: Label: Object Type](#stored-object-label-object-type) + +##### Object Action: Label: Action + +This label defines the actions taken with respects to the various objects. + +The string for this label is `"action"`. + +Possible values: +- `"created"` +- `"updated"` +- `"deleted"` + + + +--- +## Timers + +All TX and Query endpoints have related timing metrics. + +### TX Keys + +`"metadata"`, `"tx"`, `{endpoint}` + +Example `{endpoint}` values: `"WriteScope"`, `"DeleteContractSpecification"`, `"ModifyOSLocator"`. + +### Query Keys + +`"metadata"`, `"query"`, `{endpoint}` + +Example `{endpoint}` values: `"Scope"`, `"ContractSpecificationsAll"`, `"OSLocatorsByScope"`. diff --git a/docs/sdk/metadata/08_params.md b/docs/sdk/metadata/08_params.md new file mode 100644 index 000000000..addf5a873 --- /dev/null +++ b/docs/sdk/metadata/08_params.md @@ -0,0 +1,13 @@ +# Metadata Parameters + +## Base Module Parameters + +The base metadata module itself does not have any parameters. + +## Object Store Locator Parameters + +The object store locator sub-module contains the following parameters: + +| Key | Type | Example | +|------------------------|--------|---------| +| MaxUriLength | uint32 | 2048 | diff --git a/docs/sdk/metadata/README.md b/docs/sdk/metadata/README.md new file mode 100644 index 000000000..4b13b9ea9 --- /dev/null +++ b/docs/sdk/metadata/README.md @@ -0,0 +1,27 @@ +# `x/metadata` + +## Overview + +The metadata service provides a system for referencing off-chain information. +It can be used to record and validate information as well as the processes that create the information. + +## Contents + +1. **[Concepts](01_concepts.md)** + - [Metadata Addresses](01_concepts.md#metadata-addresses) +1. **[State](02_state.md)** + - [Scopes](02_state.md#scopes) + - [Sessions](02_state.md#sessions) + - [Records](02_state.md#records) + - [Scope Specifications](02_state.md#scope-specifications) + - [Contract Specifications](02_state.md#contract-specifications) + - [Record Specifications](02_state.md#record-specifications) + - [Object Store Locators](02_state.md#object-store-locators) +1. **[Messages](03_messages.md)** +1. **[Authz](04_authz.md)** +1. **[Queries](05_queries.md)** +1. **[Events](06_events.md)** +1. **[Telemetry](07_telemetry.md)** +1. **[Params](08_params.md)** + + diff --git a/docs/sdk/metadata/examples/go/metadata_address.go b/docs/sdk/metadata/examples/go/metadata_address.go new file mode 100644 index 000000000..164ee6ae8 --- /dev/null +++ b/docs/sdk/metadata/examples/go/metadata_address.go @@ -0,0 +1,240 @@ +package provenance + +import ( + "bytes" + "crypto/sha256" + "fmt" + "strings" + + "github.com/cosmos/cosmos-sdk/types/bech32" + "github.com/google/uuid" +) + +const ( + PrefixScope = "scope" + PrefixSession = "session" + PrefixRecord = "record" + PrefixScopeSpecification = "scopespec" + PrefixContractSpecification = "contractspec" + PrefixRecordSpecification = "recspec" + + KeyScope = byte(0x00) + KeySession = byte(0x01) + KeyRecord = byte(0x02) + KeyScopeSpecification = byte(0x04) // Note that this is not in numerical order. + KeyContractSpecification = byte(0x03) + KeyRecordSpecification = byte(0x05) +) + +// MetadataAddress is a type that helps create ids for the various types objects stored by the metadata module. +type MetadataAddress []byte + +// MetadataAddressForScope creates a MetadataAddress instance for the given scope by its uuid +func MetadataAddressForScope(scopeUUID uuid.UUID) MetadataAddress { + return buildBytes(KeyScope, uuidMustMarshalBinary(scopeUUID)) +} + +// MetadataAddressForSession creates a MetadataAddress instance for a session within a scope by uuids +func MetadataAddressForSession(scopeUUID uuid.UUID, sessionUUID uuid.UUID) MetadataAddress { + return buildBytes(KeySession, uuidMustMarshalBinary(scopeUUID), uuidMustMarshalBinary(sessionUUID)) +} + +// MetadataAddressForRecord creates a MetadataAddress instance for a record within a scope by scope uuid/record name +func MetadataAddressForRecord(scopeUUID uuid.UUID, recordName string) MetadataAddress { + if stringIsBlank(recordName) { + panic("invalid recordName: cannot be empty or blank") + } + return buildBytes(KeyRecord, uuidMustMarshalBinary(scopeUUID), stringAsHashedBytes(recordName)) +} + +// MetadataAddressForScopeSpecification creates a MetadataAddress instance for a scope specification +func MetadataAddressForScopeSpecification(scopeSpecUUID uuid.UUID) MetadataAddress { + return buildBytes(KeyScopeSpecification, uuidMustMarshalBinary(scopeSpecUUID)) +} + +// MetadataAddressForContractSpecification creates a MetadataAddress instance for a contract specification +func MetadataAddressForContractSpecification(contractSpecUUID uuid.UUID) MetadataAddress { + return buildBytes(KeyContractSpecification, uuidMustMarshalBinary(contractSpecUUID)) +} + +// MetadataAddressForRecordSpecification creates a MetadataAddress instance for a record specification +func MetadataAddressForRecordSpecification(contractSpecUUID uuid.UUID, recordSpecName string) MetadataAddress { + if stringIsBlank(recordSpecName) { + panic("invalid recordSpecName: cannot be empty or blank") + } + return buildBytes(KeyRecordSpecification, uuidMustMarshalBinary(contractSpecUUID), stringAsHashedBytes(recordSpecName)) +} + +// MetadataAddressFromBech32 creates a MetadataAddress from a Bech32 string. The encoded data is checked against the +// provided bech32 hrp along with an overall verification of the byte format. +func MetadataAddressFromBech32(address string) (MetadataAddress, error) { + hrp, bz, err := bech32.DecodeAndConvert(address) + if err != nil { + return nil, err + } + err = validateBytes(bz) + if err != nil { + return nil, err + } + expectedHrp := getPrefixFromKey(bz[0]) + if hrp != expectedHrp { + return nil, fmt.Errorf("incorrect hrp: expected %s, actual %s", expectedHrp, hrp) + } + return bz, nil +} + +func MetadataAddressFromBytes(bz []byte) (MetadataAddress, error) { + err := validateBytes(bz) + if err != nil { + return nil, err + } + return bz, nil +} + +// GetKey gets the key byte for this MetadataAddress. +func (m MetadataAddress) GetKey() byte { + return m[0] +} + +// GetPrefix gets the prefix string for this MetadataAddress, e.g. "scope". +func (m MetadataAddress) GetPrefix() string { + return getPrefixFromKey(m[0]) +} + +// GetPrimaryUUID gets the set of bytes for the primary uuid part of this MetadataAddress as a UUID. +func (m MetadataAddress) GetPrimaryUUID() uuid.UUID { + retval, err := uuid.FromBytes(m[1:17]) + if err != nil { + panic(err) + } + return retval +} + +// GetSecondaryBytes gets a copy of the bytes that make up the secondary part of this MetadataAddress. +func (m MetadataAddress) GetSecondaryBytes() []byte { + if len(m) <= 17 { + return []byte{} + } + retval := make([]byte, len(m)-17) + copy(retval, m[17:]) + return retval +} + +// Bytes gets all the bytes of this MetadataAddress. +func (m MetadataAddress) Bytes() []byte { + return m +} + +// String implements the stringer interface and encodes as a bech32. +func (m MetadataAddress) String() string { + if len(m) == 0 { + return "" + } + bech32Addr, err := bech32.ConvertAndEncode(getPrefixFromKey(m[0]), m) + if err != nil { + panic(err) + } + return bech32Addr +} + +// Equals implementation for comparing MetadataAddress values. +func (m MetadataAddress) Equals(m2 MetadataAddress) bool { + return (m == nil && m2 == nil) || bytes.Equal(m, m2) +} + +// Format implements fmt.Format interface +// %s formats as bech32 address string (same as m.String()). +// %p formats as the address of 0th element in base 16 notation, with leading 0x. +// all others format as base 16, upper-case, two characters per byte. +func (m MetadataAddress) Format(s fmt.State, verb rune) { + switch verb { + case 's': + s.Write([]byte(m.String())) + case 'p': + s.Write([]byte(fmt.Sprintf("%p", m.Bytes()))) + default: + s.Write([]byte(fmt.Sprintf("%X", m.Bytes()))) + } +} + +// uuidMustMarshalBinary gets the bytes of a UUID or panics. +func uuidMustMarshalBinary(id uuid.UUID) []byte { + bz, err := id.MarshalBinary() + if err != nil { + panic(err) + } + return bz +} + +// stringIsBlank returns true if the string is empty or all whitespace. +func stringIsBlank(str string) bool { + return len(strings.TrimSpace(str)) == 0 +} + +// stringAsHashedBytes hashes a string and gets the bytes desired for a MetadataAddress. +func stringAsHashedBytes(str string) []byte { + bz := sha256.Sum256([]byte(strings.ToLower(strings.TrimSpace(str)))) + return bz[0:16] +} + +// buildBytes creates a new slice with the provided bytes. +func buildBytes(key byte, parts ...[]byte) []byte { + l := 1 + for _, p := range parts { + l += len(p) + } + retval := make([]byte, 0, l) + retval = append(retval, key) + for _, p := range parts { + retval = append(retval, p...) + } + return retval +} + +// getPrefixFromKey gets the prefix that corresponds to the provided key byte. +func getPrefixFromKey(key byte) string { + switch key { + case KeyScope: + return PrefixScope + case KeySession: + return PrefixSession + case KeyRecord: + return PrefixRecord + case KeyScopeSpecification: + return PrefixScopeSpecification + case KeyContractSpecification: + return PrefixContractSpecification + case KeyRecordSpecification: + return PrefixRecordSpecification + default: + panic(fmt.Errorf("invalid key: %d", key)) + } +} + +// validateBytes makes sure the provided bytes have a correct key and length. +func validateBytes(bz []byte) error { + if len(bz) == 0 { + return fmt.Errorf("no bytes found in metadata address") + } + expectedLength := 0 + switch bz[0] { + case KeyScope: + expectedLength = 17 + case KeySession: + expectedLength = 33 + case KeyRecord: + expectedLength = 33 + case KeyScopeSpecification: + expectedLength = 17 + case KeyContractSpecification: + expectedLength = 17 + case KeyRecordSpecification: + expectedLength = 33 + default: + return fmt.Errorf("invalid key: %d", bz[0]) + } + if expectedLength != len(bz) { + return fmt.Errorf("incorrect data length for %s address: expected %d, actual %d", getPrefixFromKey(bz[0]), expectedLength, len(bz)) + } + return nil +} diff --git a/docs/sdk/metadata/examples/go/metadata_address_test.go b/docs/sdk/metadata/examples/go/metadata_address_test.go new file mode 100644 index 000000000..73d295dbb --- /dev/null +++ b/docs/sdk/metadata/examples/go/metadata_address_test.go @@ -0,0 +1,239 @@ +package provenance + +import ( + "testing" + + "github.com/google/uuid" + "github.com/stretchr/testify/suite" +) + +type MetadataAddressTestSuite struct { + suite.Suite + + // Pre-selected UUID strings that go with ID strings generated from the Go code. + scopeUUIDStr string + sessionUUIDStr string + scopeSpecUUIDStr string + contractSpecUUIDStr string + recordName string + recordNameHashedBytes []byte + + // Pre-generated ID strings created using Go code and providing the above strings. + scopeIDStr string + sessionIDStr string + recordIDStr string + scopeSpecIDStr string + contractSpecIDStr string + recordSpecIDStr string + + // UUID versions of the UUID strings. + scopeUUID uuid.UUID + sessionUUID uuid.UUID + scopeSpecUUID uuid.UUID + contractSpecUUID uuid.UUID +} + +func (s *MetadataAddressTestSuite) SetupTest() { + // These strings come from the output of x/metadata/types/address_test.go TestGenerateExamples(). + + s.scopeUUIDStr = "91978ba2-5f35-459a-86a7-feca1b0512e0" + s.sessionUUIDStr = "5803f8bc-6067-4eb5-951f-2121671c2ec0" + s.scopeSpecUUIDStr = "dc83ea70-eacd-40fe-9adf-1cf6148bf8a2" + s.contractSpecUUIDStr = "def6bc0a-c9dd-4874-948f-5206e6060a84" + s.recordName = "recordname" + s.recordNameHashedBytes = []byte{234, 169, 160, 84, 154, 205, 183, 162, 227, 133, 142, 181, 183, 185, 209, 190} + + s.scopeIDStr = "scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel" + s.sessionIDStr = "session1qxge0zaztu65tx5x5llv5xc9zts9sqlch3sxwn44j50jzgt8rshvqyfrjcr" + s.recordIDStr = "record1q2ge0zaztu65tx5x5llv5xc9ztsw42dq2jdvmdazuwzcaddhh8gmu3mcze3" + s.scopeSpecIDStr = "scopespec1qnwg86nsatx5pl56muw0v9ytlz3qu3jx6m" + s.contractSpecIDStr = "contractspec1q000d0q2e8w5say53afqdesxp2zqzkr4fn" + s.recordSpecIDStr = "recspec1qh00d0q2e8w5say53afqdesxp2zw42dq2jdvmdazuwzcaddhh8gmuqhez44" + + s.scopeUUID = uuid.MustParse(s.scopeUUIDStr) + s.sessionUUID = uuid.MustParse(s.sessionUUIDStr) + s.scopeSpecUUID = uuid.MustParse(s.scopeSpecUUIDStr) + s.contractSpecUUID = uuid.MustParse(s.contractSpecUUIDStr) +} + +func TestMetadataAddressTestSuite(t *testing.T) { + suite.Run(t, new(MetadataAddressTestSuite)) +} + +func mustGetMetadataAddressFromBech32(str string) MetadataAddress { + retval, err := MetadataAddressFromBech32(str) + if err != nil { + panic(err) + } + return retval +} + +func mustGetMetadataAddressFromBytes(bz []byte) MetadataAddress { + retval, err := MetadataAddressFromBytes(bz) + if err != nil { + panic(err) + } + return retval +} + +func (s MetadataAddressTestSuite) TestScopeID() { + expectedAddr := mustGetMetadataAddressFromBech32(s.scopeIDStr) + expectedID := s.scopeIDStr + expectedKey := KeyScope + expectedPrefix := PrefixScope + expectedPrimaryUUID := s.scopeUUID + expectedSecondaryBytes := []byte{} + + actualAddr := MetadataAddressForScope(s.scopeUUID) + actualId := actualAddr.String() + actualKey := actualAddr.GetKey() + actualPrefix := actualAddr.GetPrefix() + actualPrimaryUuid := actualAddr.GetPrimaryUUID() + actualSecondaryBytes := actualAddr.GetSecondaryBytes() + + addrFromBytes := mustGetMetadataAddressFromBytes(actualAddr.Bytes()) + + s.Assert().Equal(expectedKey, actualKey, "key") + s.Assert().Equal(expectedPrefix, actualPrefix, "prefix") + s.Assert().Equal(expectedPrimaryUUID, actualPrimaryUuid, "primary uuid") + s.Assert().Equal(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + s.Assert().Equal(expectedID, actualId, "as bech32 strings") + s.Assert().Equal(expectedAddr, actualAddr, "whole metadata address") + s.Assert().Equal(expectedAddr, addrFromBytes, "address from bytes") + s.Assert().True(expectedAddr.Equals(actualAddr), "%s.Equals(%s)", expectedAddr, actualAddr) +} + +func (s MetadataAddressTestSuite) TestSessionID() { + expectedAddr := mustGetMetadataAddressFromBech32(s.sessionIDStr) + expectedID := s.sessionIDStr + expectedKey := KeySession + expectedPrefix := PrefixSession + expectedPrimaryUUID := s.scopeUUID + expectedSecondaryBytes, _ := s.sessionUUID.MarshalBinary() + + actualAddr := MetadataAddressForSession(s.scopeUUID, s.sessionUUID) + actualId := actualAddr.String() + actualKey := actualAddr.GetKey() + actualPrefix := actualAddr.GetPrefix() + actualPrimaryUuid := actualAddr.GetPrimaryUUID() + actualSecondaryBytes := actualAddr.GetSecondaryBytes() + + addrFromBytes := mustGetMetadataAddressFromBytes(actualAddr.Bytes()) + + s.Assert().Equal(expectedKey, actualKey, "key") + s.Assert().Equal(expectedPrefix, actualPrefix, "prefix") + s.Assert().Equal(expectedPrimaryUUID, actualPrimaryUuid, "primary uuid") + s.Assert().Equal(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + s.Assert().Equal(expectedID, actualId, "as bech32 strings") + s.Assert().Equal(expectedAddr, actualAddr, "whole metadata address") + s.Assert().Equal(expectedAddr, addrFromBytes, "address from bytes") + s.Assert().True(expectedAddr.Equals(actualAddr), "%s.Equals(%s)", expectedAddr, actualAddr) +} + +func (s MetadataAddressTestSuite) TestRecordID() { + expectedAddr := mustGetMetadataAddressFromBech32(s.recordIDStr) + expectedID := s.recordIDStr + expectedKey := KeyRecord + expectedPrefix := PrefixRecord + expectedPrimaryUUID := s.scopeUUID + expectedSecondaryBytes := s.recordNameHashedBytes + + actualAddr := MetadataAddressForRecord(s.scopeUUID, s.recordName) + actualId := actualAddr.String() + actualKey := actualAddr.GetKey() + actualPrefix := actualAddr.GetPrefix() + actualPrimaryUuid := actualAddr.GetPrimaryUUID() + actualSecondaryBytes := actualAddr.GetSecondaryBytes() + + addrFromBytes := mustGetMetadataAddressFromBytes(actualAddr.Bytes()) + + s.Assert().Equal(expectedKey, actualKey, "key") + s.Assert().Equal(expectedPrefix, actualPrefix, "prefix") + s.Assert().Equal(expectedPrimaryUUID, actualPrimaryUuid, "primary uuid") + s.Assert().Equal(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + s.Assert().Equal(expectedID, actualId, "as bech32 strings") + s.Assert().Equal(expectedAddr, actualAddr, "whole metadata address") + s.Assert().Equal(expectedAddr, addrFromBytes, "address from bytes") + s.Assert().True(expectedAddr.Equals(actualAddr), "%s.Equals(%s)", expectedAddr, actualAddr) +} + +func (s MetadataAddressTestSuite) TestScopeSpecID() { + expectedAddr := mustGetMetadataAddressFromBech32(s.scopeSpecIDStr) + expectedID := s.scopeSpecIDStr + expectedKey := KeyScopeSpecification + expectedPrefix := PrefixScopeSpecification + expectedPrimaryUUID := s.scopeSpecUUID + expectedSecondaryBytes := []byte{} + + actualAddr := MetadataAddressForScopeSpecification(s.scopeSpecUUID) + actualId := actualAddr.String() + actualKey := actualAddr.GetKey() + actualPrefix := actualAddr.GetPrefix() + actualPrimaryUuid := actualAddr.GetPrimaryUUID() + actualSecondaryBytes := actualAddr.GetSecondaryBytes() + + addrFromBytes := mustGetMetadataAddressFromBytes(actualAddr.Bytes()) + + s.Assert().Equal(expectedKey, actualKey, "key") + s.Assert().Equal(expectedPrefix, actualPrefix, "prefix") + s.Assert().Equal(expectedPrimaryUUID, actualPrimaryUuid, "primary uuid") + s.Assert().Equal(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + s.Assert().Equal(expectedID, actualId, "as bech32 strings") + s.Assert().Equal(expectedAddr, actualAddr, "whole metadata address") + s.Assert().Equal(expectedAddr, addrFromBytes, "address from bytes") + s.Assert().True(expectedAddr.Equals(actualAddr), "%s.Equals(%s)", expectedAddr, actualAddr) +} + +func (s MetadataAddressTestSuite) TestContractSpecID() { + expectedAddr := mustGetMetadataAddressFromBech32(s.contractSpecIDStr) + expectedID := s.contractSpecIDStr + expectedKey := KeyContractSpecification + expectedPrefix := PrefixContractSpecification + expectedPrimaryUUID := s.contractSpecUUID + expectedSecondaryBytes := []byte{} + + actualAddr := MetadataAddressForContractSpecification(s.contractSpecUUID) + actualId := actualAddr.String() + actualKey := actualAddr.GetKey() + actualPrefix := actualAddr.GetPrefix() + actualPrimaryUuid := actualAddr.GetPrimaryUUID() + actualSecondaryBytes := actualAddr.GetSecondaryBytes() + + addrFromBytes := mustGetMetadataAddressFromBytes(actualAddr.Bytes()) + + s.Assert().Equal(expectedKey, actualKey, "key") + s.Assert().Equal(expectedPrefix, actualPrefix, "prefix") + s.Assert().Equal(expectedPrimaryUUID, actualPrimaryUuid, "primary uuid") + s.Assert().Equal(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + s.Assert().Equal(expectedID, actualId, "as bech32 strings") + s.Assert().Equal(expectedAddr, actualAddr, "whole metadata address") + s.Assert().Equal(expectedAddr, addrFromBytes, "address from bytes") + s.Assert().True(expectedAddr.Equals(actualAddr), "%s.Equals(%s)", expectedAddr, actualAddr) +} + +func (s MetadataAddressTestSuite) TestRecordSpecID() { + expectedAddr := mustGetMetadataAddressFromBech32(s.recordSpecIDStr) + expectedID := s.recordSpecIDStr + expectedKey := KeyRecordSpecification + expectedPrefix := PrefixRecordSpecification + expectedPrimaryUUID := s.contractSpecUUID + expectedSecondaryBytes := s.recordNameHashedBytes + + actualAddr := MetadataAddressForRecordSpecification(s.contractSpecUUID, s.recordName) + actualId := actualAddr.String() + actualKey := actualAddr.GetKey() + actualPrefix := actualAddr.GetPrefix() + actualPrimaryUuid := actualAddr.GetPrimaryUUID() + actualSecondaryBytes := actualAddr.GetSecondaryBytes() + + addrFromBytes := mustGetMetadataAddressFromBytes(actualAddr.Bytes()) + + s.Assert().Equal(expectedKey, actualKey, "key") + s.Assert().Equal(expectedPrefix, actualPrefix, "prefix") + s.Assert().Equal(expectedPrimaryUUID, actualPrimaryUuid, "primary uuid") + s.Assert().Equal(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + s.Assert().Equal(expectedID, actualId, "as bech32 strings") + s.Assert().Equal(expectedAddr, actualAddr, "whole metadata address") + s.Assert().Equal(expectedAddr, addrFromBytes, "address from bytes") + s.Assert().True(expectedAddr.Equals(actualAddr), "%s.Equals(%s)", expectedAddr, actualAddr) +} diff --git a/docs/sdk/metadata/examples/js/.gitignore b/docs/sdk/metadata/examples/js/.gitignore new file mode 100644 index 000000000..735d5f286 --- /dev/null +++ b/docs/sdk/metadata/examples/js/.gitignore @@ -0,0 +1,118 @@ +# Standard .gitignore for node copied from https://github.com/github/gitignore/blob/master/Node.gitignore + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) +web_modules/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test + +# parcel-bundler cache (https://parceljs.org/) +.cache +.parcel-cache + +# Next.js build output +.next +out + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and not Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Stores VSCode versions used for testing VSCode extensions +.vscode-test + +# yarn v2 +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* \ No newline at end of file diff --git a/docs/sdk/metadata/examples/js/lib/metadata-address.js b/docs/sdk/metadata/examples/js/lib/metadata-address.js new file mode 100644 index 000000000..4573cbc2f --- /dev/null +++ b/docs/sdk/metadata/examples/js/lib/metadata-address.js @@ -0,0 +1,346 @@ +"use strict"; +const bech32 = require('bech32').bech32; +const sha256 = require('crypto-js/sha256') + +// A looser UUID regex (than spec) since all we care about are having 16 bytes. +const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i; + +/** + * Checks if the provided thing is a array or typed array + * @param thing the thing to check. + * @returns true if either an array or typed array, false otherwise. + */ +function isAnArray(thing) { + if (thing == null) { + return false; + } + // Object.prototype.toString.call(thing) will return something like "[object Uint8Array]". + // If the last thing ends in "Array" then close enough. Gotta include the ']' too though. + return Array.isArray(thing) || /Array]$/.test(Object.prototype.toString.call(thing)); + +} + +// Taken and tweaked from https://github.com/uuidjs/uuid/blob/master/src/parse.js +/** + * Parse a UUID string into an array of bytes. + * @param uuidStr the UUID string to parse, e.g. "2CD73ED5-54BF-4C5B-A2B1-30860B8FD21E" + * @returns A Uint8Array with 16 elements. + */ +function parseUuid(uuidStr) { + if (typeof uuidStr !== 'string' || !uuidRegex.test(uuidStr)) { + throw 'Invalid uuidStr.'; + } + + const retval = new Uint8Array(16); + let v; + + // Parse ########-....-....-....-............ + retval[0] = (v = parseInt(uuidStr.slice(0, 8), 16)) >>> 24; + retval[1] = v >>> 16 & 0xff; + retval[2] = v >>> 8 & 0xff; + retval[3] = v & 0xff; + + // Parse ........-####-....-....-............ + retval[4] = (v = parseInt(uuidStr.slice(9, 13), 16)) >>> 8; + retval[5] = v & 0xff; + + // Parse ........-....-####-....-............ + retval[6] = (v = parseInt(uuidStr.slice(14, 18), 16)) >>> 8; + retval[7] = v & 0xff; + + // Parse ........-....-....-####-............ + retval[8] = (v = parseInt(uuidStr.slice(19, 23), 16)) >>> 8; + retval[9] = v & 0xff; + + // Parse ........-....-....-....-############ + // (Use "/" to avoid 32-bit truncation when bit-shifting high-order bytes) + retval[10] = (v = parseInt(uuidStr.slice(24, 36), 16)) / 0x10000000000 & 0xff; + retval[11] = v / 0x100000000 & 0xff; + retval[12] = v >>> 24 & 0xff; + retval[13] = v >>> 16 & 0xff; + retval[14] = v >>> 8 & 0xff; + retval[15] = v & 0xff; + return retval; +} + +/** + * Convert an array of bytes into a UUID string (lowercase). + * @param bytes the array containing the bytes to convert. + * @returns A lowercase string in the format "2cd73ed5-54bf-4c5b-a2b1-30860b8fd21e". + */ +function uuidString(bytes) { + // We just want 16 bytes, so if bytes is longer than that, just get the first 16. + // If it's shorter, leave the rest of them 0. + // This isn't really a standard thing, but I'm favoring this over extra validation and errors. + const uuidBytes = new Uint8Array(16); + if (bytes != null) { + if (isAnArray(bytes)) { + for (let i = 0; i < bytes.length && i < 16; i++) { + // converts bytes[i] to a unsigned 8-bit integer. + // Overflows are wrapped, e.g. -1 becomes 255, and 256 becomes 0, decimals are truncated. + // Strings are converted to numbers as expected, then the same overflow stuff can happen. + uuidBytes[i] = bytes[i]; + } + } else { + console.log('ignoring bytes argument provided to uuidString because it is not an array or typed array.'); + } + } + + let retval = ""; + for (let i = 0; i < 16; i++) { + retval = retval + (uuidBytes[i] + 0x100).toString(16).substr(1); + if (i === 3 || i === 5 || i === 7 || i === 9) { + retval = retval + "-"; + } + } + return retval.toLowerCase(); +} + +/** + * Hashes the provided string and gets the bytes we care about for a MetadataAddress. + * @param string the string to hash. + * @return A Uint8Array with 16 elements. + */ +function getHashedBytes(string) { + let sha256Sum = sha256(string.trim().toLowerCase()); + // A sha256 sum is 32 bytes. + // That sha256 function returns 8 words that are 4 bytes each. + // We want the info in 1 byte chunks, though. + // For MetadataAddress purposes, we also only care about the first 16 bytes. + // 16 bytes / 4 bytes/word = 4 words and each word has 4 bytes. + let bytes = []; + for (let i = 0; i < 4; i++) { + bytes.push( + sha256Sum.words[i] >>> 24, + sha256Sum.words[i] >>> 16 & 0xff, + sha256Sum.words[i] >>> 8 & 0xff, + sha256Sum.words[i] & 0xff, + ) + } + return Uint8Array.from(bytes); +} + +/** + * Get everything that this MetadataAddress library should export. + * @return an object meant for module.exports. + */ +function getMetadataAddressLibrary() { + // The name value of a MetadataAddress object. + const METADATA_ADDRESS_NAME = "MetadataAddress"; + + // Prefix strings for the various types of Metadata Addresses. + const PREFIX_SCOPE = "scope"; + const PREFIX_SESSION = "session"; + const PREFIX_RECORD = "record"; + const PREFIX_SCOPE_SPECIFICATION = "scopespec"; + const PREFIX_CONTRACT_SPECIFICATION = "contractspec"; + const PREFIX_RECORD_SPECIFICATION = "recspec"; + + // Key bytes for the various types of Metadata Addresses. + const KEY_SCOPE = 0; + const KEY_SESSION = 1; + const KEY_RECORD = 2; + const KEY_SCOPE_SPECIFICATION = 4; // Note that this is not in numerical order. + const KEY_CONTRACT_SPECIFICATION = 3; + const KEY_RECORD_SPECIFICATION = 5; + + /** + * Get the prefix for a key byte. + * @param key the byte in question. + * @returns a string prefix, e.g. "scope". + */ + function getPrefixFromKey(key) { + let prefix = key === KEY_SCOPE ? PREFIX_SCOPE + : key === KEY_SESSION ? PREFIX_SESSION + : key === KEY_RECORD ? PREFIX_RECORD + : key === KEY_SCOPE_SPECIFICATION ? PREFIX_SCOPE_SPECIFICATION + : key === KEY_CONTRACT_SPECIFICATION ? PREFIX_CONTRACT_SPECIFICATION + : key === KEY_RECORD_SPECIFICATION ? PREFIX_RECORD_SPECIFICATION + : undefined; + if (prefix === undefined) { + throw 'Invalid key: [' + key + ']'; + } + return prefix; + } + + /** + * Makes sure the bytes have a valid key and correct length. + * @param bytes the array of bytes to validate. + * @returns nothing, but might throw an exception. + */ + function validateBytes(bytes) { + if (bytes == null || bytes.length === 0) { + throw 'Invalid bytes: undefined, null, or empty.'; + } + let expectedLength = bytes[0] === KEY_SCOPE ? 17 + : bytes[0] === KEY_SESSION ? 33 + : bytes[0] === KEY_RECORD ? 33 + : bytes[0] === KEY_SCOPE_SPECIFICATION ? 17 + : bytes[0] === KEY_CONTRACT_SPECIFICATION ? 17 + : bytes[0] === KEY_RECORD_SPECIFICATION ? 33 + : undefined; + if (expectedLength === undefined) { + throw 'Invalid key: [' + key + ']'; + } + if (expectedLength !== bytes.length) { + throw 'Incorrect data length for type [' + getPrefixFromKey(bytes[0]) + ']: expected [' + expectedLength + '], actual [' + bytes.length + ']'; + } + } + + /** + * Private constructor for a MetadataAddress. + * @param key the key byte for this MetadataAddress. + * @param primaryUuid either a UUID string or an array with the 16 bytes of the primary UUID. + * @param secondary either a string to be hashed or an array of bytes. + */ + function newMetadataAddress(key, primaryUuid, secondary) { + if (!Number.isInteger(key) || key < 0 || key > 5) { + throw 'Invalid key: expected integer between 0 and 5 (inclusive), actual: [' + key + ']'; + } + if (primaryUuid == null) { + throw 'Invalid primaryUuid: null or undefined.'; + } + let primaryUuidBytes = (typeof primaryUuid === "string") ? parseUuid(primaryUuid) + : Uint8Array.from(primaryUuid); + if (primaryUuidBytes.length !== 16) { + throw 'Invalid primaryUuid: expected byte length [16], actual [' + primaryUuid.length + '].'; + } + let secondaryBytes = secondary == null ? new Uint8Array(0) + : (typeof secondary === "string") ? getHashedBytes(secondary) + : Uint8Array.from(secondary); + + // Create the private array of bytes representing this address. + const bytes = new Uint8Array(17 + secondaryBytes.length); + bytes[0] = key; + for (let i = 0; i < 16; i++) { + bytes[i+1] = primaryUuidBytes[i]; + } + for (let i = 0; i < secondaryBytes.length; i++) { + bytes[i+17] = secondaryBytes[i]; + } + + // Pre-compute the bech32 to flush out any final issues (and prevent extra work later). + let bytesAsBech32 = bech32.encode(getPrefixFromKey(bytes[0]), bech32.toWords(bytes)); + + let retval = { + /** The name of this object: "MetadataAddress". */ + name: METADATA_ADDRESS_NAME, + /** The key byte (integer) for this MetadataAddress. */ + key: bytes[0], + /** The prefix string for this MetadataAddress, e.g. "scope". */ + prefix: getPrefixFromKey(bytes[0]), + /** The lowercase UUID string of the bytes of the primary UUID in this MetadataAddress. */ + primaryUuid: uuidString(bytes.slice(1,17)), + /** The secondary bytes of this MetadataAddress (may be empty). */ + secondaryBytes: bytes.slice(17), + /** The bech32 address string of this MetadataAddress. */ + bech32: bytesAsBech32, + /** Returns the bech32 address string for this MetadataAddress. */ + toString: function() { + return bytesAsBech32; + }, + equals: function(other) { + return other != null && other.name === METADATA_ADDRESS_NAME && bytesAsBech32 === other.toString(); + } + }; + + // Make some of the retval properties read-only and show up during object enumeration. + ['name', 'key', 'prefix', 'primaryUuid', 'secondaryBytes', 'bech32'].forEach(function(field) { + Object.defineProperty(retval, field, { + value: retval[field], + writable: false, + enumerable: true + }); + }) + + // Create a getter property for the bytes that always returns a copy of the bytes array. + // This helps prevent this MetadataAddress from being altered while still providing its information. + Object.defineProperty(retval, 'bytes', { + get: function() { + return bytes.slice(0); + }, + enumerable: true + }); + + return retval; + } + + /** Creates a MetadataAddress for a scope. */ + function forScope(scopeUuid) { + return newMetadataAddress(KEY_SCOPE, scopeUuid); + } + + /** Creates a MetadataAddress for a session. */ + function forSession(scopeUuid, sessionUuid) { + if (typeof sessionUuid === 'string') { + sessionUuid = parseUuid(sessionUuid); + } + return newMetadataAddress(KEY_SESSION, scopeUuid, sessionUuid); + } + + /** Creates a MetadataAddress for a record. */ + function forRecord(scopeUuid, recordName) { + return newMetadataAddress(KEY_RECORD, scopeUuid, recordName); + } + + /** Creates a MetadataAddress for a scope specification. */ + function forScopeSpecification(scopeSpecUuid) { + return newMetadataAddress(KEY_SCOPE_SPECIFICATION, scopeSpecUuid); + } + + /** Creates a MetadataAddress for a contract specification. */ + function forContractSpecification(contractSpecUuid) { + return newMetadataAddress(KEY_CONTRACT_SPECIFICATION, contractSpecUuid); + } + + /** Creates a MetadataAddress for a record specification. */ + function forRecordSpecification(contractSpecUuid, recordSpecName) { + return newMetadataAddress(KEY_RECORD_SPECIFICATION, contractSpecUuid, recordSpecName); + } + + /** Creates a MetadataAddress from a bech32 string. */ + function fromBech32(bech32Str) { + let b32 = bech32.decode(bech32Str); + let hrp = b32.prefix; + let bytes = bech32.fromWords(b32.words); + validateBytes(bytes); + let prefix = getPrefixFromKey(bytes[0]); + if (prefix !== hrp) { + throw 'Incorrect HRP: expected [' + prefix + '], actual [' + hrp + '].'; + } + return newMetadataAddress(bytes[0], bytes.slice(1,17), bytes.slice(17)); + } + + /** Creates a MetadataAddress from an array of bytes. */ + function fromBytes(bytes) { + validateBytes(bytes); + return newMetadataAddress(bytes[0], bytes.slice(1,17), bytes.slice(17)); + } + + return { + forScope, + forSession, + forRecord, + forScopeSpecification, + forContractSpecification, + forRecordSpecification, + fromBech32, + fromBytes, + + PREFIX_SCOPE, + PREFIX_SESSION, + PREFIX_RECORD, + PREFIX_SCOPE_SPECIFICATION, + PREFIX_CONTRACT_SPECIFICATION, + PREFIX_RECORD_SPECIFICATION, + + KEY_SCOPE, + KEY_SESSION, + KEY_RECORD, + KEY_SCOPE_SPECIFICATION, + KEY_CONTRACT_SPECIFICATION, + KEY_RECORD_SPECIFICATION, + }; +} + +module.exports = getMetadataAddressLibrary(); \ No newline at end of file diff --git a/docs/sdk/metadata/examples/js/test/metadata_address_test.js b/docs/sdk/metadata/examples/js/test/metadata_address_test.js new file mode 100644 index 000000000..ade32cc8d --- /dev/null +++ b/docs/sdk/metadata/examples/js/test/metadata_address_test.js @@ -0,0 +1,220 @@ +"use strict"; +const test = require('ava'); +const MetadataAddress = require('../lib/metadata-address'); + +// IntelliJ can't yet handle ava tests, and will tell you to run them on the command line. +// Additionally, if run from the root of the repo, undesired js files might get attention. +// It's best to run ava from the root of this js example. +// From the root of this repo: +// $ cd x/metadata/spec/examples/js +// $ node_modules/.bin/ava +// If that doesn't exist, you might need to: +// $ npm install + +// These strings come from the output of x/metadata/types/address_test.go TestGenerateExamples(). + +// Pre-selected UUID strings that go with ID strings generated from the Go code. +const SCOPE_UUID = "91978ba2-5f35-459a-86a7-feca1b0512e0"; +const SESSION_UUID = "5803f8bc-6067-4eb5-951f-2121671c2ec0"; +const SCOPE_SPEC_UUID = "dc83ea70-eacd-40fe-9adf-1cf6148bf8a2"; +const CONTRACT_SPEC_UUID = "def6bc0a-c9dd-4874-948f-5206e6060a84"; +const RECORD_NAME = "recordname"; +const RECORD_NAME_HASHED_BYTES = Uint8Array.of(234, 169, 160, 84, 154, 205, 183, 162, 227, 133, 142, 181, 183, 185, 209, 190); + +// Pre-generated ID strings created using Go code and providing the above strings. +const SCOPE_ID = "scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel"; +const SESSION_ID = "session1qxge0zaztu65tx5x5llv5xc9zts9sqlch3sxwn44j50jzgt8rshvqyfrjcr"; +const RECORD_ID = "record1q2ge0zaztu65tx5x5llv5xc9ztsw42dq2jdvmdazuwzcaddhh8gmu3mcze3"; +const SCOPE_SPEC_ID = "scopespec1qnwg86nsatx5pl56muw0v9ytlz3qu3jx6m"; +const CONTRACT_SPEC_ID = "contractspec1q000d0q2e8w5say53afqdesxp2zqzkr4fn"; +const RECORD_SPEC_ID = "recspec1qh00d0q2e8w5say53afqdesxp2zw42dq2jdvmdazuwzcaddhh8gmuqhez44"; + +// Copied from metadata-address.js +/** + * Convert an array of bytes into a UUID string (lowercase). + * @param bytes the array containing the bytes to convert. + * @returns A lowercase string in the format "2cd73ed5-54bf-4c5b-a2b1-30860b8fd21e". + */ +function uuidString(bytes) { + // We just want 16 bytes, so if bytes is longer than that, just get the first 16. + // If it's shorter, leave the rest of them 0. + // This isn't really a standard thing, but I'm favoring this over extra validation and errors. + const uuidBytes = new Uint8Array(16); + if (bytes != null) { + if (Number.isInteger(bytes.length)) { + for (let i = 0; i < bytes.length && i < 16; i++) { + // converts bytes[i] to a unsigned 8-bit integer. + // Overflows are wrapped, e.g. -1 becomes 255, and 256 becomes 0, decimals are truncated. + // Strings are converted to numbers as expected, then the same overflow stuff can happen. + uuidBytes[i] = bytes[i]; + } + } else { + console.log('ignoring bytes argument provided to uuidString because it is not an array or typed array.'); + } + } + + let retval = ""; + for (let i = 0; i < 16; i++) { + retval = retval + (uuidBytes[i] + 0x100).toString(16).substr(1); + if (i === 3 || i === 5 || i === 7 || i === 9) { + retval = retval + "-"; + } + } + return retval.toLocaleLowerCase(); +} + +test('scopeId', t => { + let expectedAddr = MetadataAddress.fromBech32(SCOPE_ID); + let expectedId = SCOPE_ID; + let expectedKey = MetadataAddress.KEY_SCOPE; + let expectedPrefix = MetadataAddress.PREFIX_SCOPE; + let expectedPrimaryUuid = SCOPE_UUID; + let expectedSecondaryBytes = new Uint8Array(0); + + let actualAddr = MetadataAddress.forScope(SCOPE_UUID); + let actualId = actualAddr.toString(); + let actualKey = actualAddr.key; + let actualPrefix = actualAddr.prefix; + let actualPrimaryUuid = actualAddr.primaryUuid; + let actualSecondaryBytes = actualAddr.secondaryBytes; + + let addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes); + + t.deepEqual(expectedKey, actualKey, "key") + t.deepEqual(expectedPrefix, actualPrefix, "prefix") + t.deepEqual(expectedPrimaryUuid, actualPrimaryUuid, "primary UUID") + t.deepEqual(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + t.deepEqual(expectedId, actualId, "as bech32 string") + t.assert(expectedAddr.equals(actualAddr), "whole metadata address") + t.assert(expectedAddr.equals(addrFromBytes), "address from bytes") +}); + +test('sessionId', t => { + let expectedAddr = MetadataAddress.fromBech32(SESSION_ID); + let expectedId = SESSION_ID; + let expectedKey = MetadataAddress.KEY_SESSION; + let expectedPrefix = MetadataAddress.PREFIX_SESSION; + let expectedPrimaryUuid = SCOPE_UUID; + let expectedSecondaryBytes = SESSION_UUID; + + let actualAddr = MetadataAddress.forSession(SCOPE_UUID, SESSION_UUID); + let actualId = actualAddr.toString(); + let actualKey = actualAddr.key; + let actualPrefix = actualAddr.prefix; + let actualPrimaryUuid = actualAddr.primaryUuid; + let actualSecondaryBytes = uuidString(actualAddr.secondaryBytes); + + let addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes); + + t.deepEqual(expectedKey, actualKey, "key") + t.deepEqual(expectedPrefix, actualPrefix, "prefix") + t.deepEqual(expectedPrimaryUuid, actualPrimaryUuid, "primary UUID") + t.deepEqual(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + t.deepEqual(expectedId, actualId, "as bech32 string") + t.assert(expectedAddr.equals(actualAddr), "whole metadata address") + t.assert(expectedAddr.equals(addrFromBytes), "address from bytes") +}); + +test('recordId', t => { + let expectedAddr = MetadataAddress.fromBech32(RECORD_ID); + let expectedId = RECORD_ID; + let expectedKey = MetadataAddress.KEY_RECORD; + let expectedPrefix = MetadataAddress.PREFIX_RECORD; + let expectedPrimaryUuid = SCOPE_UUID; + let expectedSecondaryBytes = RECORD_NAME_HASHED_BYTES; + + let actualAddr = MetadataAddress.forRecord(SCOPE_UUID, RECORD_NAME); + let actualId = actualAddr.toString(); + let actualKey = actualAddr.key; + let actualPrefix = actualAddr.prefix; + let actualPrimaryUuid = actualAddr.primaryUuid; + let actualSecondaryBytes = actualAddr.secondaryBytes; + + let addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes); + + t.deepEqual(expectedKey, actualKey, "key") + t.deepEqual(expectedPrefix, actualPrefix, "prefix") + t.deepEqual(expectedPrimaryUuid, actualPrimaryUuid, "primary UUID") + t.deepEqual(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + t.deepEqual(expectedId, actualId, "as bech32 string") + t.assert(expectedAddr.equals(actualAddr), "whole metadata address") + t.assert(expectedAddr.equals(addrFromBytes), "address from bytes") +}); + +test('scopeSpecId', t => { + let expectedAddr = MetadataAddress.fromBech32(SCOPE_SPEC_ID); + let expectedId = SCOPE_SPEC_ID; + let expectedKey = MetadataAddress.KEY_SCOPE_SPECIFICATION; + let expectedPrefix = MetadataAddress.PREFIX_SCOPE_SPECIFICATION; + let expectedPrimaryUuid = SCOPE_SPEC_UUID; + let expectedSecondaryBytes = new Uint8Array(0); + + let actualAddr = MetadataAddress.forScopeSpecification(SCOPE_SPEC_UUID); + let actualId = actualAddr.toString(); + let actualKey = actualAddr.key; + let actualPrefix = actualAddr.prefix; + let actualPrimaryUuid = actualAddr.primaryUuid; + let actualSecondaryBytes = actualAddr.secondaryBytes; + + let addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes); + + t.deepEqual(expectedKey, actualKey, "key") + t.deepEqual(expectedPrefix, actualPrefix, "prefix") + t.deepEqual(expectedPrimaryUuid, actualPrimaryUuid, "primary UUID") + t.deepEqual(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + t.deepEqual(expectedId, actualId, "as bech32 string") + t.assert(expectedAddr.equals(actualAddr), "whole metadata address") + t.assert(expectedAddr.equals(addrFromBytes), "address from bytes") +}); + +test('contractSpecId', t => { + let expectedAddr = MetadataAddress.fromBech32(CONTRACT_SPEC_ID); + let expectedId = CONTRACT_SPEC_ID; + let expectedKey = MetadataAddress.KEY_CONTRACT_SPECIFICATION; + let expectedPrefix = MetadataAddress.PREFIX_CONTRACT_SPECIFICATION; + let expectedPrimaryUuid = CONTRACT_SPEC_UUID; + let expectedSecondaryBytes = new Uint8Array(0); + + let actualAddr = MetadataAddress.forContractSpecification(CONTRACT_SPEC_UUID); + let actualId = actualAddr.toString(); + let actualKey = actualAddr.key; + let actualPrefix = actualAddr.prefix; + let actualPrimaryUuid = actualAddr.primaryUuid; + let actualSecondaryBytes = actualAddr.secondaryBytes; + + let addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes); + + t.deepEqual(expectedKey, actualKey, "key") + t.deepEqual(expectedPrefix, actualPrefix, "prefix") + t.deepEqual(expectedPrimaryUuid, actualPrimaryUuid, "primary UUID") + t.deepEqual(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + t.deepEqual(expectedId, actualId, "as bech32 string") + t.assert(expectedAddr.equals(actualAddr), "whole metadata address") + t.assert(expectedAddr.equals(addrFromBytes), "address from bytes") +}); + +test('recordSpecId', t => { + let expectedAddr = MetadataAddress.fromBech32(RECORD_SPEC_ID); + let expectedId = RECORD_SPEC_ID; + let expectedKey = MetadataAddress.KEY_RECORD_SPECIFICATION; + let expectedPrefix = MetadataAddress.PREFIX_RECORD_SPECIFICATION; + let expectedPrimaryUuid = CONTRACT_SPEC_UUID; + let expectedSecondaryBytes = RECORD_NAME_HASHED_BYTES; + + let actualAddr = MetadataAddress.forRecordSpecification(CONTRACT_SPEC_UUID, RECORD_NAME); + let actualId = actualAddr.toString(); + let actualKey = actualAddr.key; + let actualPrefix = actualAddr.prefix; + let actualPrimaryUuid = actualAddr.primaryUuid; + let actualSecondaryBytes = actualAddr.secondaryBytes; + + let addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes); + + t.deepEqual(expectedKey, actualKey, "key") + t.deepEqual(expectedPrefix, actualPrefix, "prefix") + t.deepEqual(expectedPrimaryUuid, actualPrimaryUuid, "primary UUID") + t.deepEqual(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + t.deepEqual(expectedId, actualId, "as bech32 string") + t.assert(expectedAddr.equals(actualAddr), "whole metadata address") + t.assert(expectedAddr.equals(addrFromBytes), "address from bytes") +}); diff --git a/docs/sdk/metadata/examples/kotlin/README.md b/docs/sdk/metadata/examples/kotlin/README.md new file mode 100644 index 000000000..454093d23 --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/README.md @@ -0,0 +1,19 @@ +# Metadata Kotlin Examples + +This README is only here for developer troubleshooting information. +Individual code examples should be discussed in the main spec files. + +## IntelliJ Troubleshooting + +Problem: + +Either the .kt files have a lot of red or IntelliJ doesn't give an option to run the unit tests: + +Solution: + +1. Open the `x/metadata/spec/examples/kotlin/build.gradle.kts` file. +1. There should be a banner at the top indicating an issue with code insight. +1. Click the "Link Gradle Project" link on the right end of that banner. +1. Navigate to and select that same `x/metadata/spec/examples/kotlin/build.gradle.kts` file. +1. Wait for the loading/indexing to finish. + diff --git a/docs/sdk/metadata/examples/kotlin/build.gradle.kts b/docs/sdk/metadata/examples/kotlin/build.gradle.kts new file mode 100644 index 000000000..d0bb972c3 --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/build.gradle.kts @@ -0,0 +1,36 @@ +import org.jetbrains.kotlin.gradle.tasks.KotlinCompile + +plugins { + kotlin("jvm") version "1.4.32" +} + +group = "io.provenance" +version = "1.0-SNAPSHOT" + +repositories { + mavenCentral() +} + +dependencies { + testImplementation(kotlin("test-junit5")) + testImplementation("org.junit.jupiter:junit-jupiter-api:5.6.0") + testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.6.0") + implementation(kotlin("stdlib-jdk8")) +} + +tasks.test { + useJUnitPlatform() +} + +tasks.withType() { + kotlinOptions.jvmTarget = "11" +} + +val compileKotlin: KotlinCompile by tasks +compileKotlin.kotlinOptions { + jvmTarget = "1.8" +} +val compileTestKotlin: KotlinCompile by tasks +compileTestKotlin.kotlinOptions { + jvmTarget = "1.8" +} \ No newline at end of file diff --git a/docs/sdk/metadata/examples/kotlin/gradle/wrapper/gradle-wrapper.jar b/docs/sdk/metadata/examples/kotlin/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 000000000..e708b1c02 Binary files /dev/null and b/docs/sdk/metadata/examples/kotlin/gradle/wrapper/gradle-wrapper.jar differ diff --git a/docs/sdk/metadata/examples/kotlin/gradle/wrapper/gradle-wrapper.properties b/docs/sdk/metadata/examples/kotlin/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 000000000..f371643ee --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-7.0-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/docs/sdk/metadata/examples/kotlin/gradlew b/docs/sdk/metadata/examples/kotlin/gradlew new file mode 100755 index 000000000..4f906e0c8 --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/gradlew @@ -0,0 +1,185 @@ +#!/usr/bin/env sh + +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=`expr $i + 1` + done + case $i in + 0) set -- ;; + 1) set -- "$args0" ;; + 2) set -- "$args0" "$args1" ;; + 3) set -- "$args0" "$args1" "$args2" ;; + 4) set -- "$args0" "$args1" "$args2" "$args3" ;; + 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=`save "$@"` + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +exec "$JAVACMD" "$@" diff --git a/docs/sdk/metadata/examples/kotlin/gradlew.bat b/docs/sdk/metadata/examples/kotlin/gradlew.bat new file mode 100644 index 000000000..ac1b06f93 --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/gradlew.bat @@ -0,0 +1,89 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/docs/sdk/metadata/examples/kotlin/src/main/kotlin/Bech32.kt b/docs/sdk/metadata/examples/kotlin/src/main/kotlin/Bech32.kt new file mode 100644 index 000000000..85acf0bd7 --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/src/main/kotlin/Bech32.kt @@ -0,0 +1,169 @@ +package io.provenance + +import java.io.ByteArrayOutputStream + +/** Data involved with a Bech32 address */ +data class Bech32Data(val hrp: String, val data: ByteArray) { + + /** + * The encapsulated data returned as a Hexadecimal string + */ + val hexData = this.data.joinToString("") { "%02x".format(it) } + + /** + * The Bech32 encoded value of the data prefixed with the human readable portion and + * protected by an appended checksum. + */ + val address = Bech32.encode(hrp, data) + + /** + * The Bech32 Address toString prints state information for debugging purposes. + * @see address() for the bech32 encoded address string output. + */ + override fun toString(): String { + return "bech32 : ${this.address}\nhuman: ${this.hrp} \nbytes: ${this.hexData}" + /* + bech32 : provenance1gx58vp8pryh3jkvxnkvzmd0hqmqqnyqxrtvheq + human: provenance + bytes: 41A87604E1192F1959869D982DB5F706C0099006 + */ + } + + /** equals implementation for a Bech32Data object. */ + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + other as Bech32Data + return this.hrp == other.hrp && + this.data.contentEquals(other.data) + } + + /** equals implementation for a Bech32Data object. */ + override fun hashCode(): Int { + var result = hrp.hashCode() + result = 31 * result + this.data.contentHashCode() + return result + } +} + +/** BIP173 compliant processing functions for handling Bech32 encoding and decoding. */ +class Bech32 { + companion object { + private const val CHECKSUM_SIZE = 6 + private const val MIN_VALID_LENGTH = 8 + private const val MAX_VALID_LENGTH = 90 + private const val MIN_VALID_CODEPOINT = 33 + private const val MAX_VALID_CODEPOINT = 126 + + private const val charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" + private val gen = intArrayOf(0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3) + + /** Decodes a Bech32 String */ + fun decode(bech32: String): Bech32Data { + require(bech32.length in MIN_VALID_LENGTH..MAX_VALID_LENGTH) { "invalid bech32 string length" } + require(bech32.toCharArray().none { c -> c.toInt() < MIN_VALID_CODEPOINT || c.toInt() > MAX_VALID_CODEPOINT }) + { "invalid character in bech32: ${bech32.toCharArray().map { c -> c.toInt() } + .filter { c -> c < MIN_VALID_CODEPOINT || c > MAX_VALID_CODEPOINT }}" } + + require(bech32 == bech32.toLowerCase() || bech32 == bech32.toUpperCase()) + { "bech32 must be either all upper or lower case" } + require(bech32.substring(1).dropLast(CHECKSUM_SIZE).contains('1')) { "invalid index of '1'" } + + val hrp = bech32.substringBeforeLast('1').toLowerCase() + val dataString = bech32.substringAfterLast('1').toLowerCase() + + require(dataString.toCharArray().all { c -> charset.contains(c) }) { "invalid data encoding character in bech32"} + + val dataBytes = dataString.map { c -> charset.indexOf(c).toByte() }.toByteArray() + val checkBytes = dataString.takeLast(CHECKSUM_SIZE).map { c -> charset.indexOf(c).toByte() }.toByteArray() + + val actualSum = checksum(hrp, dataBytes.dropLast(CHECKSUM_SIZE).toTypedArray()) + require(1 == polymod(expandHrp(hrp).plus(dataBytes.map { d -> d.toInt() }))) { "checksum failed: $checkBytes != $actualSum" } + + return Bech32Data(hrp, convertBits(dataBytes.dropLast(CHECKSUM_SIZE).toByteArray(), 5, 8, false)) + } + + /** + * Encodes the provided hrp and data to a Bech32 address string. + * @param hrp the human readable portion (prefix) to use. + * @param eightBitData an array of 8-bit encoded bytes. + */ + fun encode(hrp: String, eightBitData: ByteArray) = + encodeFiveBitData(hrp, convertBits(eightBitData, 8, 5, true)) + + /** Encodes 5-bit bytes (fiveBitData) with a given human readable portion (hrp) into a bech32 string. */ + private fun encodeFiveBitData(hrp: String, fiveBitData: ByteArray): String { + return (fiveBitData.plus(checksum(hrp, fiveBitData.toTypedArray())) + .map { b -> charset[b.toInt()] }).joinToString("", hrp + "1") + } + + /** + * ConvertBits regroups bytes with toBits set based on reading groups of bits as a continuous stream group by fromBits. + * This process is used to convert from base64 (from 8) to base32 (to 5) or the inverse. + */ + private fun convertBits(data: ByteArray, fromBits: Int, toBits: Int, pad: Boolean): ByteArray { + require (fromBits in 1..8 && toBits in 1..8) { "only bit groups between 1 and 8 are supported"} + + var acc = 0 + var bits = 0 + val out = ByteArrayOutputStream(64) + val maxv = (1 shl toBits) - 1 + val maxAcc = (1 shl (fromBits + toBits - 1)) - 1 + + for (b in data) { + val value = b.toInt() and 0xff + if ((value ushr fromBits) != 0) { + throw IllegalArgumentException(String.format("Input value '%X' exceeds '%d' bit size", value, fromBits)) + } + acc = ((acc shl fromBits) or value) and maxAcc + bits += fromBits + while (bits >= toBits) { + bits -= toBits + out.write((acc ushr bits) and maxv) + } + } + if (pad) { + if (bits > 0) { + out.write((acc shl (toBits - bits)) and maxv) + } + } else if (bits >= fromBits || ((acc shl (toBits - bits)) and maxv) != 0) { + throw IllegalArgumentException("Could not convert bits, invalid padding") + } + return out.toByteArray() + } + + /** Calculates a bech32 checksum based on BIP 173 specification */ + private fun checksum(hrp: String, data: Array): ByteArray { + val values = expandHrp(hrp) + .plus(data.map { d -> d.toInt() }) + .plus(Array(6){ 0 }.toIntArray()) + + val poly = polymod(values) xor 1 + + return (0..5).map { + ((poly shr (5 * (5-it))) and 31).toByte() + }.toByteArray() + } + + /** Expands the human readable prefix per BIP173 for Checksum encoding */ + private fun expandHrp(hrp: String) = + hrp.map { c -> c.toInt() shr 5 } + .plus(0) + .plus(hrp.map { c -> c.toInt() and 31 }) + .toIntArray() + + /** Polynomial division function for checksum calculation. For details see BIP173 */ + private fun polymod(values: IntArray): Int { + var chk = 1 + return values.map { v -> + val b = chk shr 25 + chk = ((chk and 0x1ffffff) shl 5) xor v + (0..4).map { + if (((b shr it) and 1) == 1) { + chk = chk xor gen[it] + } + } + }.let { chk } + } + } +} \ No newline at end of file diff --git a/docs/sdk/metadata/examples/kotlin/src/main/kotlin/MetadataAddress.kt b/docs/sdk/metadata/examples/kotlin/src/main/kotlin/MetadataAddress.kt new file mode 100644 index 000000000..5df433a29 --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/src/main/kotlin/MetadataAddress.kt @@ -0,0 +1,162 @@ +package io.provenance + +import java.nio.ByteBuffer +import java.security.MessageDigest +import java.util.UUID + +const val PREFIX_SCOPE = "scope" +const val PREFIX_SESSION = "session" +const val PREFIX_RECORD = "record" +const val PREFIX_SCOPE_SPECIFICATION = "scopespec" +const val PREFIX_CONTRACT_SPECIFICATION = "contractspec" +const val PREFIX_RECORD_SPECIFICATION = "recspec" + +const val KEY_SCOPE: Byte = 0x00 +const val KEY_SESSION: Byte = 0x01 +const val KEY_RECORD: Byte = 0x02 +const val KEY_SCOPE_SPECIFICATION: Byte = 0x04 // Note that this is not in numerical order. +const val KEY_CONTRACT_SPECIFICATION: Byte = 0x03 +const val KEY_RECORD_SPECIFICATION: Byte = 0x05 + +/** + * This MetadataAddress class helps create ids for the various types objects stored by the metadata module. + */ +data class MetadataAddress internal constructor(val bytes: ByteArray) { + companion object { + /** Create a MetadataAddress for a Scope. */ + fun forScope(scopeUuid: UUID) = + MetadataAddress(byteArrayOf(KEY_SCOPE).plus(uuidAsByteArray(scopeUuid))) + + /** Create a MetadataAddress for a Session. */ + fun forSession(scopeUuid: UUID, sessionUuid: UUID) = + MetadataAddress(byteArrayOf(KEY_SESSION).plus(uuidAsByteArray(scopeUuid)).plus(uuidAsByteArray(sessionUuid))) + + /** Create a MetadataAddress for a Record. */ + fun forRecord(scopeUuid: UUID, recordName: String): MetadataAddress { + if (recordName.isBlank()) { + throw IllegalArgumentException("Invalid recordName: cannot be empty or blank.") + } + return MetadataAddress(byteArrayOf(KEY_RECORD).plus(uuidAsByteArray(scopeUuid)).plus(asHashedBytes(recordName))) + } + + /** Create a MetadataAddress for a Scope Specification. */ + fun forScopeSpecification(scopeSpecUuid: UUID) = + MetadataAddress(byteArrayOf(KEY_SCOPE_SPECIFICATION).plus(uuidAsByteArray(scopeSpecUuid))) + + /** Create a MetadataAddress for a Contract Specification. */ + fun forContractSpecification(contractSpecUuid: UUID) = + MetadataAddress(byteArrayOf(KEY_CONTRACT_SPECIFICATION).plus(uuidAsByteArray(contractSpecUuid))) + + /** Create a MetadataAddress for a Record Specification. */ + fun forRecordSpecification(contractSpecUuid: UUID, recordSpecName: String): MetadataAddress { + if (recordSpecName.isBlank()) { + throw IllegalArgumentException("Invalid recordSpecName: cannot be empty or blank.") + } + return MetadataAddress(byteArrayOf(KEY_RECORD_SPECIFICATION).plus(uuidAsByteArray(contractSpecUuid)).plus(asHashedBytes(recordSpecName))) + } + + /** Create a MetadataAddress object from a bech32 address representation of a MetadataAddress. */ + fun fromBech32(bech32Value: String): MetadataAddress { + val (hrp, data) = Bech32.decode(bech32Value) + validateBytes(data) + val prefix = getPrefixFromKey(data[0]) + if (hrp != prefix) { + throw IllegalArgumentException("Incorrect HRP: Expected ${prefix}, Actual: ${hrp}.") + } + return MetadataAddress(data) + } + + /** Create a MetadataAddress from a ByteArray. */ + fun fromBytes(bytes: ByteArray): MetadataAddress { + validateBytes(bytes) + return MetadataAddress(bytes) + } + + /** Get the prefix that corresponds to the provided key Byte. */ + private fun getPrefixFromKey(key: Byte) = + when (key) { + KEY_SCOPE -> PREFIX_SCOPE + KEY_SESSION -> PREFIX_SESSION + KEY_RECORD -> PREFIX_RECORD + KEY_SCOPE_SPECIFICATION -> PREFIX_SCOPE_SPECIFICATION + KEY_CONTRACT_SPECIFICATION -> PREFIX_CONTRACT_SPECIFICATION + KEY_RECORD_SPECIFICATION -> PREFIX_RECORD_SPECIFICATION + else -> { + throw IllegalArgumentException("Invalid key: $key") + } + } + + /** Checks that the data has a correct key and length. Throws IllegalArgumentException if not. */ + private fun validateBytes(bytes: ByteArray) { + val expectedLength = when (bytes[0]) { + KEY_SCOPE -> 17 + KEY_SESSION -> 33 + KEY_RECORD -> 33 + KEY_SCOPE_SPECIFICATION -> 17 + KEY_CONTRACT_SPECIFICATION -> 17 + KEY_RECORD_SPECIFICATION -> 33 + else -> { + throw IllegalArgumentException("Invalid key: ${bytes[0]}") + } + } + if (expectedLength != bytes.size) { + throw IllegalArgumentException("Incorrect data length for type ${getPrefixFromKey(bytes[0])}: Expected ${expectedLength}, Actual: ${bytes.size}.") + } + } + + /** Converts a UUID to a ByteArray. */ + private fun uuidAsByteArray(uuid: UUID): ByteArray { + val b = ByteBuffer.wrap(ByteArray(16)) + b.putLong(uuid.mostSignificantBits) + b.putLong(uuid.leastSignificantBits) + return b.array() + } + + /** Converts a ByteArray to a UUID. */ + private fun byteArrayAsUuid(data: ByteArray): UUID { + val uuidBytes = ByteArray(16) + if (data.size >= 16) { + data.copyInto(uuidBytes, 0, 0, 16) + } else if (data.isNotEmpty()) { + data.copyInto(uuidBytes, 0, 0, data.size) + } + val bb = ByteBuffer.wrap(uuidBytes) + val mostSig = bb.long + val leastSig = bb.long + return UUID(mostSig, leastSig) + } + + /** Hashes a string and gets the bytes desired for a MetadataAddress. */ + private fun asHashedBytes(str: String) = + MessageDigest.getInstance("SHA-256").digest(str.trim().toLowerCase().toByteArray()).copyOfRange(0, 16) + } + + /** Gets the key byte for this MetadataAddress. */ + fun getKey() = this.bytes[0] + + /** Gets the prefix string for this MetadataAddress, e.g. "scope". */ + fun getPrefix() = getPrefixFromKey(this.bytes[0]) + + /** Gets the set of bytes for the primary uuid part of this MetadataAddress as a UUID. */ + fun getPrimaryUuid() = byteArrayAsUuid(this.bytes.copyOfRange(1,17)) + + /** Gets the set of bytes for the secondary part of this MetadataAddress. */ + fun getSecondaryBytes() = if (this.bytes.size <= 17) byteArrayOf() else bytes.copyOfRange(17, this.bytes.size) + + /** returns this MetadataAddress as a bech32 address string, e.g. "scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel" */ + override fun toString() = Bech32.encode(getPrefixFromKey(this.bytes[0]), this.bytes) + + /** hashCode implementation for a MetadataAddress. */ + override fun hashCode() = this.bytes.contentHashCode() + + /** equals implementation for a MetadataAddress. */ + override fun equals(other: Any?): Boolean { + if (this === other) { + return true + } + if (other !is MetadataAddress) { + return false + } + return this.bytes.contentEquals(other.bytes) + } +} \ No newline at end of file diff --git a/docs/sdk/metadata/examples/kotlin/src/test/kotlin/MetadataAddressTest.kt b/docs/sdk/metadata/examples/kotlin/src/test/kotlin/MetadataAddressTest.kt new file mode 100644 index 000000000..f54163c4b --- /dev/null +++ b/docs/sdk/metadata/examples/kotlin/src/test/kotlin/MetadataAddressTest.kt @@ -0,0 +1,213 @@ +package io.provenance + +import org.junit.jupiter.api.Assertions.assertArrayEquals +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Test +import java.nio.ByteBuffer +import java.util.UUID + +class MetadataAddressTest { + + // These strings come from the output of x/metadata/types/address_test.go TestGenerateExamples(). + + // Pre-selected UUID strings that go with ID strings generated from the Go code. + private val scopeUuidString = "91978ba2-5f35-459a-86a7-feca1b0512e0" + private val sessionUuidString = "5803f8bc-6067-4eb5-951f-2121671c2ec0" + private val scopeSpecUuidString = "dc83ea70-eacd-40fe-9adf-1cf6148bf8a2" + private val contractSpecUuidString = "def6bc0a-c9dd-4874-948f-5206e6060a84" + private val recordName = "recordname" + private val recordNameHashedBytes = byteArrayOfInts(234, 169, 160, 84, 154, 205, 183, 162, 227, 133, 142, 181, 183, 185, 209, 190) + + // Pre-generated ID strings created using Go code and providing the above strings. + private val scopeIdString = "scope1qzge0zaztu65tx5x5llv5xc9ztsqxlkwel" + private val sessionIdString = "session1qxge0zaztu65tx5x5llv5xc9zts9sqlch3sxwn44j50jzgt8rshvqyfrjcr" + private val recordIdString = "record1q2ge0zaztu65tx5x5llv5xc9ztsw42dq2jdvmdazuwzcaddhh8gmu3mcze3" + private val scopeSpecIdString = "scopespec1qnwg86nsatx5pl56muw0v9ytlz3qu3jx6m" + private val contractSpecIdString = "contractspec1q000d0q2e8w5say53afqdesxp2zqzkr4fn" + private val recordSpecIdString = "recspec1qh00d0q2e8w5say53afqdesxp2zw42dq2jdvmdazuwzcaddhh8gmuqhez44" + + // UUID versions of the UUID strings. + private val scopeUuid: UUID = UUID.fromString(scopeUuidString) + private val sessionUuid: UUID = UUID.fromString(sessionUuidString) + private val scopeSpecUuid: UUID = UUID.fromString(scopeSpecUuidString) + private val contractSpecUuid: UUID = UUID.fromString(contractSpecUuidString) + + private fun byteArrayOfInts(vararg ints: Int) = ByteArray(ints.size) { pos -> ints[pos].toByte() } + + // Copy/Pasted out of MetadataAddress.kt so that it can be private in there. + /** Converts a UUID to a ByteArray. */ + private fun uuidAsByteArray(uuid: UUID): ByteArray { + val b = ByteBuffer.wrap(ByteArray(16)) + b.putLong(uuid.mostSignificantBits) + b.putLong(uuid.leastSignificantBits) + return b.array() + } + + @Test + fun scopeId() { + val expectedAddr = MetadataAddress.fromBech32(scopeIdString) + val expectedId = scopeIdString + val expectedKey = KEY_SCOPE + val expectedPrefix = PREFIX_SCOPE + val expectedPrimaryUuid = scopeUuid + val expectedSecondaryBytes = byteArrayOf() + + val actualAddr = MetadataAddress.forScope(scopeUuid) + val actualId = actualAddr.toString() + val actualKey = actualAddr.getKey() + val actualPrefix = actualAddr.getPrefix() + val actualPrimaryUuid = actualAddr.getPrimaryUuid() + val actualSecondaryBytes = actualAddr.getSecondaryBytes() + + val addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes) + + assertEquals(expectedKey, actualKey, "key") + assertEquals(expectedPrefix, actualPrefix, "prefix") + assertEquals(expectedPrimaryUuid, actualPrimaryUuid, "primary uuid") + assertArrayEquals(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + assertEquals(expectedId, actualId, "as bech32 strings") + assertEquals(expectedAddr, actualAddr, "whole metadata address") + assertEquals(expectedAddr, addrFromBytes, "address from bytes") + assertEquals(expectedAddr.hashCode(), actualAddr.hashCode(), "hash codes") + } + + @Test + fun sessionId() { + val expectedAddr = MetadataAddress.fromBech32(sessionIdString) + val expectedId = sessionIdString + val expectedKey = KEY_SESSION + val expectedPrefix = PREFIX_SESSION + val expectedPrimaryUuid = scopeUuid + val expectedSecondaryBytes = uuidAsByteArray(sessionUuid) + + val actualAddr = MetadataAddress.forSession(scopeUuid, sessionUuid) + val actualId = actualAddr.toString() + val actualKey = actualAddr.getKey() + val actualPrefix = actualAddr.getPrefix() + val actualPrimaryUuid = actualAddr.getPrimaryUuid() + val actualSecondaryBytes = actualAddr.getSecondaryBytes() + + val addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes) + + assertEquals(expectedKey, actualKey, "key") + assertEquals(expectedPrefix, actualPrefix, "prefix") + assertEquals(expectedPrimaryUuid, actualPrimaryUuid, "primary uuid") + assertArrayEquals(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + assertEquals(expectedId, actualId, "as bech32 strings") + assertEquals(expectedAddr, actualAddr, "whole metadata address") + assertEquals(expectedAddr, addrFromBytes, "address from bytes") + assertEquals(expectedAddr.hashCode(), actualAddr.hashCode(), "hash codes") + } + + @Test + fun recordId() { + val expectedAddr = MetadataAddress.fromBech32(recordIdString) + val expectedId = recordIdString + val expectedKey = KEY_RECORD + val expectedPrefix = PREFIX_RECORD + val expectedPrimaryUuid = scopeUuid + val expectedSecondaryBytes = recordNameHashedBytes + + val actualAddr = MetadataAddress.forRecord(scopeUuid, recordName) + val actualId = actualAddr.toString() + val actualKey = actualAddr.getKey() + val actualPrefix = actualAddr.getPrefix() + val actualPrimaryUuid = actualAddr.getPrimaryUuid() + val actualSecondaryBytes = actualAddr.getSecondaryBytes() + + val addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes) + + assertEquals(expectedKey, actualKey, "key") + assertEquals(expectedPrefix, actualPrefix, "prefix") + assertEquals(expectedPrimaryUuid, actualPrimaryUuid, "primary uuid") + assertArrayEquals(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + assertEquals(expectedId, actualId, "as bech32 strings") + assertEquals(expectedAddr, actualAddr, "whole metadata address") + assertEquals(expectedAddr, addrFromBytes, "address from bytes") + assertEquals(expectedAddr.hashCode(), actualAddr.hashCode(), "hash codes") + } + + @Test + fun scopeSpecId() { + val expectedAddr = MetadataAddress.fromBech32(scopeSpecIdString) + val expectedId = scopeSpecIdString + val expectedKey = KEY_SCOPE_SPECIFICATION + val expectedPrefix = PREFIX_SCOPE_SPECIFICATION + val expectedPrimaryUuid = scopeSpecUuid + val expectedSecondaryBytes = byteArrayOf() + + val actualAddr = MetadataAddress.forScopeSpecification(scopeSpecUuid) + val actualId = actualAddr.toString() + val actualKey = actualAddr.getKey() + val actualPrefix = actualAddr.getPrefix() + val actualPrimaryUuid = actualAddr.getPrimaryUuid() + val actualSecondaryBytes = actualAddr.getSecondaryBytes() + + val addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes) + + assertEquals(expectedKey, actualKey, "key") + assertEquals(expectedPrefix, actualPrefix, "prefix") + assertEquals(expectedPrimaryUuid, actualPrimaryUuid, "primary uuid") + assertArrayEquals(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + assertEquals(expectedId, actualId, "as bech32 strings") + assertEquals(expectedAddr, actualAddr, "whole metadata address") + assertEquals(expectedAddr, addrFromBytes, "address from bytes") + assertEquals(expectedAddr.hashCode(), actualAddr.hashCode(), "hash codes") + } + + @Test + fun contractSpecId() { + val expectedAddr = MetadataAddress.fromBech32(contractSpecIdString) + val expectedId = contractSpecIdString + val expectedKey = KEY_CONTRACT_SPECIFICATION + val expectedPrefix = PREFIX_CONTRACT_SPECIFICATION + val expectedPrimaryUuid = contractSpecUuid + val expectedSecondaryBytes = byteArrayOf() + + val actualAddr = MetadataAddress.forContractSpecification(contractSpecUuid) + val actualId = actualAddr.toString() + val actualKey = actualAddr.getKey() + val actualPrefix = actualAddr.getPrefix() + val actualPrimaryUuid = actualAddr.getPrimaryUuid() + val actualSecondaryBytes = actualAddr.getSecondaryBytes() + + val addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes) + + assertEquals(expectedKey, actualKey, "key") + assertEquals(expectedPrefix, actualPrefix, "prefix") + assertEquals(expectedPrimaryUuid, actualPrimaryUuid, "primary uuid") + assertArrayEquals(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + assertEquals(expectedId, actualId, "as bech32 strings") + assertEquals(expectedAddr, actualAddr, "whole metadata address") + assertEquals(expectedAddr, addrFromBytes, "address from bytes") + assertEquals(expectedAddr.hashCode(), actualAddr.hashCode(), "hash codes") + } + + @Test + fun recordSpecId() { + val expectedAddr = MetadataAddress.fromBech32(recordSpecIdString) + val expectedId = recordSpecIdString + val expectedKey = KEY_RECORD_SPECIFICATION + val expectedPrefix = PREFIX_RECORD_SPECIFICATION + val expectedPrimaryUuid = contractSpecUuid + val expectedSecondaryBytes = recordNameHashedBytes + + val actualAddr = MetadataAddress.forRecordSpecification(contractSpecUuid, recordName) + val actualId = actualAddr.toString() + val actualKey = actualAddr.getKey() + val actualPrefix = actualAddr.getPrefix() + val actualPrimaryUuid = actualAddr.getPrimaryUuid() + val actualSecondaryBytes = actualAddr.getSecondaryBytes() + + val addrFromBytes = MetadataAddress.fromBytes(actualAddr.bytes) + + assertEquals(expectedKey, actualKey, "key") + assertEquals(expectedPrefix, actualPrefix, "prefix") + assertEquals(expectedPrimaryUuid, actualPrimaryUuid, "primary uuid") + assertArrayEquals(expectedSecondaryBytes, actualSecondaryBytes, "secondary bytes") + assertEquals(expectedId, actualId, "as bech32 strings") + assertEquals(expectedAddr, actualAddr, "whole metadata address") + assertEquals(expectedAddr, addrFromBytes, "address from bytes") + assertEquals(expectedAddr.hashCode(), actualAddr.hashCode(), "hash codes") + } +} \ No newline at end of file diff --git a/docs/sdk/msgfees/01_concepts.md b/docs/sdk/msgfees/01_concepts.md new file mode 100644 index 000000000..8b52e6f70 --- /dev/null +++ b/docs/sdk/msgfees/01_concepts.md @@ -0,0 +1,95 @@ + + +# Concepts +The msg fees modules manages additional fees that can be applied to tx msgs specified through governance. + + + - [Additional Msg Fees](#additional-msg-fees) + - [Adding Custom Additional Fee from Wasm Contract](#adding-custom-additional-fee-from-wasm-contract) + - [Base Fee](#base-fee) + - [Total Fees](#total-fees) + - [Additional Fee Assessed in Base Denom i.e nhash](#additional-fee-assessed-in-base-denom-ie-nhash) + - [Authz and Wamsd Messages](#authz-and-wamsd-messages) + - [Simulation and Calculating the Additional Fee to be Paid](#simulation-and-calculating-the-additional-fee-to-be-paid) + + + +## Additional Msg Fees + +Fees is one of the most important tools available to secure a PoS network since it incentives staking and encourages spam prevention etc. + +As part of the provenance blockchain economics certain messages *may* require an additional fee to be paid in +addition to the normal gas consumption. + +Additional fees are assessed and finally consumed based on msgType of the msgs contained in the transaction, +and the fee schedule that is persisted on chain. These additional fees are created/updated/removed by governance through `AddMsgFeeProposal`, `UpdateMsgFeeProposal`, and `RemoveMsgFeeProposal` proposals. + +Additional fee can be in any *denom*. This can be split to an optional bech32 account address with basis points. + +## Adding Custom Additional Fee from Wasm Contract + +Creators of wasm contracts have the ability to dispatch an `MsgAssessCustomMsgFeeRequest` that charges a custom fee +defined by the creator of the contract. The set fee will be split between the fee module and a specified address in the +msg. [Assess Fee Specifications](09_messages.md) + +## Base Fee + +Base fee is the current fee implementation. Fees are paid in base denom and determined by gas value passed into the Tx. +The value collected remains the same. + +## Total Fees + +Total fees = Additional Fees (if any) + Base Fee +Total fees continue to be passed as `sdk.Coins` the Tx accepts for fee entry currently. +e.g usd.example is the denom(assuming there is a marker/coin of type usd.example) in which additional fee is being charged in +```bash +--fees 382199010nhash,99usd.example +``` + +## Additional Fee Assessed in Base Denom i.e nhash + +To preserve backwards compatibility of all invokes, clients continue accepting fees in sdk.Coins `type Coins []Coin`, and because the code needs to distinguish between base fee and additional fee, the msgfees module introduces an additional param, described in [params documentation](06_params.md), called `DefaultFloorGasPrice` to differentiate between base fee and additional fee when additional fee is in same denom as default base denom i.e nhash. + +This fee is charged initially by the antehandler, if any excess fee is left, once additional fee are paid, that's collected +at the end of the Tx also(same as current behavior) + +For e.g +Additional fee = 10000nhash +Gas = 10000 +Fee passed in = 19070000nhash + +In this client passes in an extra 10000nhash (1905 * 10000 +10000 = 19060000nhash). +Current behavior is maintained and tx passes and charges 19050000 initially and 1000 nhash plus 1000nhash extra fee passed in the deliverTx stage. +Thus, this will protect against future changes like priority mempool as well as keep current behavior same as current production. + +## Authz and Wamsd Messages + +Authz and wasmd messages are dispatched via the submessages route, so they get charged and assessed the same additional +fee if set on a submessage, caveat being they forfeit all their fees if they fail (since we have no way upfront of knowing what +the submessages maybe) + +For Example, let's say a `MsgSend` has a fee of 100usd.local and a smart contract does 3 MsgSend operations as per the logic of the smart contract, the code will expect additional fees of 300 usd.local (3 msgs x 100usd.local) to be present for the Tx to be successful. + +## Simulation and Calculating the Additional Fee to be Paid + +Current simulation method looks like this: +```kotlin +val cosmosService = cosmos.tx.v1beta1.ServiceGrpc.newBlockingStub(channel) +cosmosService.simulate(SimulateRequest.newBuilder().setTx(txFinal).build()).gasInfo.gasUsed + +``` + +In the future we recommend using the method: +```kotlin +val msgFeeClient = io.provenance.msgfees.v1.QueryGrpc.newBlockingStub(channel) +msgFeeClient.calculateTxFees(CalculateTxFeesRequest.newBuilder().setTx(txFinal).build()) + +``` + +or from the cmd line as: + +```bash +provenanced tx simulate +``` diff --git a/docs/sdk/msgfees/02_state.md b/docs/sdk/msgfees/02_state.md new file mode 100644 index 000000000..819be7a7d --- /dev/null +++ b/docs/sdk/msgfees/02_state.md @@ -0,0 +1,26 @@ + + +# State + +`MsgFee` is the core of what gets stored on the blockchain it consists of four parts + 1. the msg type url, i.e. /cosmos.bank.v1beta1.MsgSend + 2. minimum additional fees(can be of any denom) + 3. optional recipient of fee based on `recipient_basis_points` + 4. if recipient is declared they will recieve the basis points of the fee (0-10,000) + + [MsgFee proto](../../../proto/provenance/msgfees/v1/msgfees.proto#L25-L37) +```protobuf +message MsgFee { + string msg_type_url = 1; + // additional_fee can pay in any Coin( basically a Denom and Amount, Amount can be zero) + cosmos.base.v1beta1.Coin additional_fee = 2 + [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"additional_fee\""]; + string recipient = 3; // optional recipient address, the amount is split between recipient and fee module + uint32 recipient_basis_points = + 4; // optional split of funds between the recipient and fee module defaults to 50:50 split +} +``` + +This state is created via governance proposals. diff --git a/docs/sdk/msgfees/03_start_end_block.md b/docs/sdk/msgfees/03_start_end_block.md new file mode 100644 index 000000000..543064722 --- /dev/null +++ b/docs/sdk/msgfees/03_start_end_block.md @@ -0,0 +1,7 @@ + + +# Start and End Block + +The start and end block handler is not used currently by the msgfees module. diff --git a/docs/sdk/msgfees/04_queries.md b/docs/sdk/msgfees/04_queries.md new file mode 100644 index 000000000..38736609d --- /dev/null +++ b/docs/sdk/msgfees/04_queries.md @@ -0,0 +1,52 @@ + + +# MsgFees Queries + + +## Msg/GenesisState + +GenesisState contains a set of msg fees, exported and later imported from/to the store. +[genesis.proto](../../../proto/provenance/msgfees/v1/genesis.proto?plain=1) + + +## Query Request/Response Object +get params for the module. [get params](../../../proto/provenance/msgfees/v1/query.proto?plain=1) + +[query all msgfees in the system](../../../proto/provenance/msgfees/v1/query.proto?plain=1) +QueryAllMsgFeesRequest/QueryAllMsgFeesResponse resquest/response for all messages +which have fees associated with them. + +[simuate fees(including additional fees to be paid for a Tx)](../../../proto/provenance/msgfees/v1/query.proto?plain=1) +To simulate the fees required on the Tx use CalculateTxFeesRequest + +Request: [CalculateTxFeesRequest](../../../proto/provenance/msgfees/v1/query.proto#L59-L68) +```protobuf +// CalculateTxFeesRequest is the request type for the Query RPC method. +message CalculateTxFeesRequest { + // tx_bytes is the transaction to simulate. + bytes tx_bytes = 1; + // default_base_denom is used to set the denom used for gas fees + // if not set it will default to nhash. + string default_base_denom = 2; + // gas_adjustment is the adjustment factor to be multiplied against the estimate returned by the tx simulation + float gas_adjustment = 3; +} +``` +Response: [CalculateTxFeesResponse](../../../proto/provenance/msgfees/v1/query.proto#L70-L81) +```protobuf +// CalculateTxFeesResponse is the response type for the Query RPC method. +message CalculateTxFeesResponse { + // additional_fees are the amount of coins to be for addition msg fees + repeated cosmos.base.v1beta1.Coin additional_fees = 1 + [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins"]; + // total_fees are the total amount of fees needed for the transactions (msg fees + gas fee) + // note: the gas fee is calculated with the min gas fee param as a constant + repeated cosmos.base.v1beta1.Coin total_fees = 2 + [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins"]; + // estimated_gas is the amount of gas needed for the transaction + uint64 estimated_gas = 3 [(gogoproto.moretags) = "yaml:\"estimated_gas\""]; +} +``` +total fee is calculated based on `floor_gas_price` param set to 1905nhash for now. diff --git a/docs/sdk/msgfees/05_events.md b/docs/sdk/msgfees/05_events.md new file mode 100644 index 000000000..d0e7d77db --- /dev/null +++ b/docs/sdk/msgfees/05_events.md @@ -0,0 +1,58 @@ + + +# Events + +Existing fee event continue to show total fee charged + + + - [Any Tx](#any-tx) + - [Tx with Additional Fee](#tx-with-additional-fee) + - [Tx Summary Event](#tx-summary-event) + - [Add/Update/Remove Proposal](#addupdateremove-proposal) + +## Any Tx + +If a Tx was successful, or if it failed, but the min fee was charged, these two events are emitted: + +| Type | Attribute Key | Attribute Value | +| -------- |------------------|-------------------------------| +| tx | fee | total fee (coins) | +| tx | min_fee_charged | floor gas price * gas (coins) | + + +## Tx with Additional Fee + +If there are tx msgs that have additional fees, and those fees were successfully charged, a breakdown event will be emitted. + +Type: tx + +| Attribute Key | Attribute Value | +| ------------- | -------------------------------------------------------------------| +| additionalfee | additional fee charged (coins) | +| basefee | total fee - additional fee, should always cover gas costs (coins) | + +## Tx Summary Event + +If there are tx msgs that have additional fees, and those fees were successfully charged, a summary event will be emitted. + +Type: provenance.msgfees.v1.EventMsgFees + +| Type | Attribute Key | Attribute Value | +| ------------ | ------------- | --------------------------------------------------------------------------- | +| EventMsgFees | MsgFees | A JSON list of EventMsgFee entries summarizing each msg type and recipient. | + +Each `EventMsgFee` has the following fields: + +| Field Name | Field Value | +| ------------- | ------------------------------------------------------------------------------------------------------ | +| type_url | The type url for the tx msg that has a msg fee. | +| count | A count of txs with this msg type. | +| total | The total amount of additional fees for this msg type and recipient (type_url count * msg fee = total) | +| recipient | the bech32 address that the fee was sent to. An empty string indicates the module is the recipient. | + +## Add/Update/Remove Proposal + +Governance proposals events(for proposed msg fees) will continue to be emitted by cosmos sdk. + (https://github.com/cosmos/cosmos-sdk/blob/master/x/gov/spec/04_events.md) diff --git a/docs/sdk/msgfees/06_params.md b/docs/sdk/msgfees/06_params.md new file mode 100644 index 000000000..b67ee742a --- /dev/null +++ b/docs/sdk/msgfees/06_params.md @@ -0,0 +1,19 @@ + + + +# Parameters + +The MsgFee module contains the following parameter: + +| Key | Type | Example | +|------------------------|----------|-----------------------------------| +| FloorGasPrice | `uint32` | `"1905"` | +| NhashPerUsdMil | `uint64` | `"14285714"` | + + + +FloorGasPrice is the value of base denom that is charged for calculating base fees, for when base fee and additional fee are charged in the base denom. + +NhashPerUsdMil is the number of nhash per usd mil \ No newline at end of file diff --git a/docs/sdk/msgfees/07_governance.md b/docs/sdk/msgfees/07_governance.md new file mode 100644 index 000000000..b5ce26e1e --- /dev/null +++ b/docs/sdk/msgfees/07_governance.md @@ -0,0 +1,95 @@ + + +# Governance Proposal Control + +The msgfee module supports addition, update, and deletion of Msg Type which are assessed fees via governance proposal. + + + - [Add MsgFee Proposal](#add-msgfee-proposal) + - [Update MsgFee Proposal](#update-msgfee-proposal) + - [Remove MsgFee Proposal](#remove-msgfee-proposal) + + + +## Add MsgFee Proposal + +AddMsgFeeProposal defines a governance proposal to create a new msgfee entry for a specific `MsgType`. + +Add proposal [AddMsgFeeProposal](../../../proto/provenance/msgfees/v1/proposals.proto#L19-L34): +```protobuf +// AddMsgFeeProposal defines a governance proposal to add additional msg based fee +message AddMsgFeeProposal { + option (gogoproto.equal) = true; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + + string msg_type_url = 3; + + cosmos.base.v1beta1.Coin additional_fee = 4 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins", + (gogoproto.moretags) = "yaml:\"additional_fee\"" + ]; + string recipient = 5; + uint32 recipient_basis_points = 6; +} + +``` +sample command to add an additional fee locally + +```bash + ${PROVENANCE_DEV_DIR}/build/provenanced -t tx msgfees proposal add "adding" "adding bank send addition fee" 10000000000nhash \ + --msg-type=/cosmos.bank.v1beta1.MsgSend --additional-fee 99usd.local\ + --from node0 \ + --home ${PROVENANCE_DEV_DIR}/build/node0 \ + --chain-id chain-local \ + --keyring-backend test \ + --gas auto \ + --fees 250990180nhash \ + --broadcast-mode block \ + --yes \ + --testnet +``` +## Update MsgFee Proposal + +Update proposal [UpdateMsgFeeProposal](../../../proto/provenance/msgfees/v1/proposals.proto#L36-L51): +```protobuf +// UpdateMsgFeeProposal defines a governance proposal to update a current msg based fee +message UpdateMsgFeeProposal { + option (gogoproto.equal) = true; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + + string msg_type_url = 3; + + cosmos.base.v1beta1.Coin additional_fee = 4 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins", + (gogoproto.moretags) = "yaml:\"additional_fee\"" + ]; + string recipient = 5; + uint32 recipient_basis_points = 6; +} +``` + +## Remove MsgFee Proposal + +Remove proposal [RemoveMsgFeeProposal](../../../proto/provenance/msgfees/v1/proposals.proto#L53-L62): +```protobuf +// RemoveMsgFeeProposal defines a governance proposal to delete a current msg based fee +message RemoveMsgFeeProposal { + option (gogoproto.equal) = true; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + + string msg_type_url = 3; +} +``` diff --git a/docs/sdk/msgfees/08_genesis.md b/docs/sdk/msgfees/08_genesis.md new file mode 100644 index 000000000..b60132bf6 --- /dev/null +++ b/docs/sdk/msgfees/08_genesis.md @@ -0,0 +1,11 @@ + +# MsgFees Genesis + + + +## Msg/GenesisState + +GenesisState contains a set of msg fees, exported and later imported from/to the store. +[genesis.proto](../../../proto/provenance/msgfees/v1/genesis.proto?plain=1) \ No newline at end of file diff --git a/docs/sdk/msgfees/09_messages.md b/docs/sdk/msgfees/09_messages.md new file mode 100644 index 000000000..6028e6290 --- /dev/null +++ b/docs/sdk/msgfees/09_messages.md @@ -0,0 +1,27 @@ +# Messages + +In this section we describe the messages that are used in the msgfees module. + +## MsgAssessCustomMsgFeeRequest + +A custom fee is applied when this message is broadcast. This would be used in a smart contract to charge a custom fee for the usage. + +```proto +// MsgAssessCustomMsgFeeRequest defines an sdk.Msg type +message MsgAssessCustomMsgFeeRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = true; + + string name = 1; // optional short name for custom msg fee, this will be emitted as a property of the event + cosmos.base.v1beta1.Coin amount = 2 [(gogoproto.nullable) = false]; // amount of additional fee that must be paid + string recipient = 3; // optional recipient address, the basis points amount is sent to the recipient + string from = 4; // the signer of the msg + string recipient_basis_points = 5; // optional basis points 0 - 10,000 for recipient defaults to 10,000 +} +``` + +The `amount` must be in `usd` or `nhash` else the msg will not pass validation. If the amount is specified as `usd` this will be converted +to `nhash` using the `UsdConversionRate` param. Note: `usd` and `UsdConversionRate` are specified in mils. Example: 1234 = $1.234 + +The `recipient` is a bech32 address of an account that will receive the amount calculated from the `recipient_basis_points`. If the `recipient_basis_points` is left empty the whole `amount` will be sent to the recipient. The remainder is sent the the Fee Module. \ No newline at end of file diff --git a/docs/sdk/msgfees/README.md b/docs/sdk/msgfees/README.md new file mode 100644 index 000000000..24a813f21 --- /dev/null +++ b/docs/sdk/msgfees/README.md @@ -0,0 +1,17 @@ +# `x/msgfees` + +## Overview + +The msg fees modules manages additional fees that can be applied to tx msgs specified through governance. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[Start End Block](03_start_end_block.md)** +4. **[Queries](04_queries.md)** +5. **[Events](05_events.md)** +6. **[Params](06_params.md)** +7. **[Governance](07_governance.md)** +8. **[Genesis](08_genesis.md)** +9. **[Messages](09_messages.md)** \ No newline at end of file diff --git a/docs/sdk/name/01_concepts.md b/docs/sdk/name/01_concepts.md new file mode 100644 index 000000000..39f074ff0 --- /dev/null +++ b/docs/sdk/name/01_concepts.md @@ -0,0 +1,38 @@ +# Concepts + +The name service builds up a heirarchy of names similar to DNS using dot separated strings. Each level in the heirarchy +can be setup with an account that "owns" the name. This owner must sign transactions that seek to add new names under +this level. Names created under another name can have a new owner thus transfering control from one account to another. + +## Delegating Control + +Every label in a name is owned by an address. Starting from the root address each level can be configured to allow any user to add a new child or for the exclusive control of the creator to add child names. The `Restricted` flag is used to indicate the permission requirements for adding child nodes. + +```proto +// NameRecord is a structure used to bind ownership of a name heirarchy to a collection of addresses +message NameRecord { + option (gogoproto.goproto_stringer) = false; + + // The bound name + string name = 1; + // The address the name resolves to. + string address = 2; + // Whether owner signature is required to add sub-names. + bool restricted = 3; +} +``` + +## Normalization + +Name records are normalized before being processed for creation or query. Each component of the name must conform to a standard set of rules. The sha256 of the normalized value is used internally for comparision purposes. + +1. Names are always stored and compared using a lower case form or a hash derived from this normalized form. +2. Unicode values that are not graphic, lower case, or digits are considered invalid. +3. A single occurance of the hyphen-minus character is allowed unless the value conforms to a valid UUID. +```value: - +HYPHEN-MINUS +Unicode: U+002D, UTF-8: 2D +``` +4. Each component of the name is restricted to a length of 2 to 32 characters (inclusive). These limits are configurable in the module [parameters](./05_params.md). +5. A maximum of 16 components for a name (levels in the heirarchy) is also enforced and configurable in the module parameters. +6. Leading and trailing spaces are always trimmed off of names for consistency during processing and evaluation. \ No newline at end of file diff --git a/docs/sdk/name/02_state.md b/docs/sdk/name/02_state.md new file mode 100644 index 000000000..7f5f2cf11 --- /dev/null +++ b/docs/sdk/name/02_state.md @@ -0,0 +1,49 @@ + +# State + +The name module holds a very simple state collection. + + +## Name Record KV Values +Name records are stored using a key based upon a concatenated +list of hashes based on each label within the name. This approach allows all of the names in the tree under a given +name to be quickly queried and iterated over. + +``` +Name: foo +key = 2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae + +Name: foo.bar +key = 2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae.fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9 + +Name: foo.bar.baz +key = 2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae.fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9.baa5a0964d3320fbc0c6a922140453c8513ea24ab8fd0577034804a967248096 + +``` + +## Address Record KV Index +In addition to the records stored by name an address cache is maintained for the addresses associated with each name +record. This allows simple and fast reverse lookup queries to be performed. + +``` +Address: pb1tg3ktger9ttlscehl3r5j4pqw7qzmvs4qr9vpm +key = 5A2365A3232AD7F86337FC4749542077802DB215.2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae.fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9 +value = foo.bar +``` + +## Name Record + +Name records are encoded using the following protobuf type +``` +// NameRecord is a structure used to bind ownership of a name heirarchy to a collection of addresses +message NameRecord { + option (gogoproto.goproto_stringer) = false; + + // The bound name + string name = 1; + // The address the name resolved to. + string address = 2; + // Whether owner signature is required to add sub-names. + bool restricted = 3; +} +``` \ No newline at end of file diff --git a/docs/sdk/name/03_messages.md b/docs/sdk/name/03_messages.md new file mode 100644 index 000000000..b865faeca --- /dev/null +++ b/docs/sdk/name/03_messages.md @@ -0,0 +1,130 @@ +# Messages + +In this section we describe the processing of the staking messages and the corresponding updates to the state. + + + - [MsgBindNameRequest](#msgbindnamerequest) + - [MsgDeleteNameRequest](#msgdeletenamerequest) + - [MsgModifyNameRequest](#msgmodifynamerequest) + - [CreateRootNameProposal](#createrootnameproposal) + - [MsgCreateRootNameRequest](#msgcreaterootnamerequest) + +## MsgBindNameRequest + +A name record is created using the `MsgBindNameRequest` message. + +```proto +message MsgBindNameRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // The parent record to bind this name under. + NameRecord parent = 1 [(gogoproto.nullable) = false]; + // The name record to bind under the parent + NameRecord record = 2 [(gogoproto.nullable) = false]; +} +``` + +This message is expected to fail if: +- The parent name record does not exist +- The requestor does not match the owner listed on the parent record _and_ the parent record indicates creation of child records is restricted. +- The record being created is otherwise invalid due to format or contents of the name value itself + - Insuffient length of name + - Excessive length of name + - Not deriving from the parent record (targets another root) + +If successful a name record will be created as described and an address index record will be created for the address associated with the name. +## MsgDeleteNameRequest + +The delete name request method allows a name record that does not contain any children records to be removed from the system. All +associated attributes on account addresses will be deleted. + +```proto +// MsgDeleteNameRequest defines an sdk.Msg type that is used to remove an existing address/name binding. The binding +// may not have any child names currently bound for this request to be successful. All associated attributes on account addresses will be deleted. +message MsgDeleteNameRequest { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + // The parent record the record to remove is under. + NameRecord parent = 1 [(gogoproto.nullable) = false]; + // The record being removed + NameRecord record = 2 [(gogoproto.nullable) = false]; +} +``` + +This message is expected to fail if: +- Any components of the request do not pass basic integrity and format checks +- The parent name record does not exist +- The record to remove does not exist +- Any child records exist under the record being removed +- The requestor does not match the owner listed on the record. + +## MsgModifyNameRequest + +A name record is modified by proposing the `MsgModifyNameRequest` message. + +```proto +// MsgModifyNameRequest defines a method that is used to update an existing address/name binding. +message MsgModifyNameRequest { + option (cosmos.msg.v1.signer) = "authority"; + + // The address signing the message + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // The record being updated + NameRecord record = 2 [(gogoproto.nullable) = false]; +} +``` + +This message is expected to fail if: +- Any components of the request do not pass basic integrity and format checks +- The record to update does not exist +- The authority does not match the gov module or the name owner. + +If successful a name record will be updated with the new address and restriction. + +## CreateRootNameProposal + +The create root name proposal is a governance proposal that allows new root level names to be established after the genesis of the blockchain. + +```proto +message CreateRootNameProposal { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = false; + + string title = 1; + string description = 2; + string name = 3; + string owner = 4; + bool restricted = 5; +} +``` + +This message is expected to fail if: +- The name already exists +- Insuffient length of name +- Excessive length of name + +## MsgCreateRootNameRequest + +The `MsgCreateRootNameRequest` is a governance proposal that allows new root level names to be established after the genesis of the blockchain. + +```proto +message MsgCreateRootNameRequest { + option (cosmos.msg.v1.signer) = "authority"; + + // The signing authority for the request + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // NameRecord is a structure used to bind ownership of a name hierarchy to a collection of addresses + NameRecord record = 2; +} +``` + +This message is expected to fail if: +- The name already exists +- Insuffient length of name +- Excessive length of name +- The authority does not match the gov module. + +If successful a name record will be created with the provided address and restriction. diff --git a/docs/sdk/name/04_events.md b/docs/sdk/name/04_events.md new file mode 100644 index 000000000..7860e0f52 --- /dev/null +++ b/docs/sdk/name/04_events.md @@ -0,0 +1,47 @@ +# Events + +The name module emits the following events: + + + - [Handlers](#handlers) + - [MsgBindNameRequest](#msgbindnamerequest) + - [MsgDeleteNameRequest](#msgdeletenamerequest) + - [MsgModifyNameRequest](#msgmodifynamerequest) + - [CreateRootNameProposal](#createrootnameproposal) + +## Handlers + +### MsgBindNameRequest + +| Type | Attribute Key | Attribute Value | +| --------------------- | --------------------- | ------------------------- | +| name_bound | name | \{NameRecord|Name\} | +| name_bound | address | \{NameRecord|Address\} | +| name_bound | restricted | \{NameRecord|Restricted\} | + + +### MsgDeleteNameRequest + +| Type | Attribute Key | Attribute Value | +| --------------------- | --------------------- | ------------------------- | +| name_unbound | name | \{NameRecord|Name\} | +| name_unbound | address | \{NameRecord|Address\} | +| name_unbound | restricted | \{NameRecord|Restricted\} | + +### MsgModifyNameRequest + +| Type | Attribute Key | Attribute Value | +| --------------------- | --------------------- | ------------------------- | +| name_modify | authority | \{String\} | +| name_modify | name | \{NameRecord|Name\} | +| name_modify | address | \{NameRecord|Address\} | +| name_modify | restricted | \{NameRecord|Restricted\} | + + +### CreateRootNameProposal + +| Type | Attribute Key | Attribute Value | +| --------------------- | --------------------- | ------------------------- | +| name_bound | name | \{NameRecord|Name\} | +| name_bound | address | \{NameRecord|Address\} | +| name_bound | restricted | \{NameRecord|Restricted\} | diff --git a/docs/sdk/name/05_params.md b/docs/sdk/name/05_params.md new file mode 100644 index 000000000..ebe80bc8e --- /dev/null +++ b/docs/sdk/name/05_params.md @@ -0,0 +1,11 @@ + +# Parameters + +The name module contains the following parameters: + +| Key | Type | Example | +|------------------------|--------|---------| +| MaxSegmentLength | uint32 | 32 | +| MinSegmentLength | uint32 | 2 | +| MaxNameLevels | uint32 | 16 | +| AllowUnrestrictedNames | bool | false | \ No newline at end of file diff --git a/docs/sdk/name/README.md b/docs/sdk/name/README.md new file mode 100644 index 000000000..467dfa5e0 --- /dev/null +++ b/docs/sdk/name/README.md @@ -0,0 +1,25 @@ +# `x/name` + +## Overview + +The name service is intended to provide a system for creating human-readable names as aliases for addresses and to imply ownership and control. These names can be used to provide a stable reference to a changing address or collection of addresses. + +One issue with a blockchain is that addresses are complex strings of characters that are difficult to type and remember. On the other hand the name service can provide a potentially shorter and easier to remember alias such as `provenance.pb` or `attribute.user.pb` to use in place of the address. + +### A Name Hierarchy + +Another challenge for users of a blockchain is establishing authority and delegating control. A specific example of this is the definition of the authoritative source of a piece of information. Where did this information come from? Who created it/vetted it? How can this control be distributed in such a way that the right people can control the information? A narrow aspect of this type of control can be satisfied through the creation of a hierarchical name system modeled after DNS. If the address `passport.pb` has been created and is owned by the Provenance Passport application, then `level-3.accredited.passport.pb` can be expected to be under the direct or delegated control of the passport application. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[Messages](03_messages.md)** + - [MsgBindNameRequest](03_messages.md#msgbindnamerequest) + - [MsgDeleteNameRequest](03_messages.md#msgdeletenamerequest) + - [MsgModifyNameRequest](03_messages.md#msgmodifynamerequest) + - [CreateRootNameProposal](03_messages.md#createrootnameproposal)) + - [MsgCreateRootNameRequest](03_messages.md#msgcreaterootnamerequest)) +4. **[Events](04_events.md)** + - [Handlers](04_events.md#handlers) +5. **[Parameters](05_params.md)** \ No newline at end of file diff --git a/docs/sdk/oracle/01_concepts.md b/docs/sdk/oracle/01_concepts.md new file mode 100644 index 000000000..9311268b4 --- /dev/null +++ b/docs/sdk/oracle/01_concepts.md @@ -0,0 +1,29 @@ + + +# Concepts + +The oracle module is very minimal, but users should understand what the `Oracle` is and how it interacts with `ICQ`. + + + - [Oracle](#oracle) + - [Interchain Queries (ICQ)](#interchain-queries-icq) + + +--- +## Oracle + +The `Oracle` is a custom built CosmWasm smart contract that the chain queries for data. Chain users can update the address with a proposal. + +## Interchain Queries (ICQ) + +`ICQ` is heavily leveraged in order to allow one Provenance Blockcahin to query another Provenance Blockchain's `Oracle`. This module acts as both the `Controller` and receiver of the `Host` in the `ICQ` realm. + +When a user intends to query another chain, they initiate the process by submitting a query through a transaction on the `ICQ Controller`. This `Controller` delivers the query from the transaction to the `ICQ Host` module of the destination chain via `IBC`. Subsequently, the received query is routed by the `ICQ Host` to this module. Upon receipt, the module queries the `Oracle` using the provided input, and the resulting information is then transmitted back to the `ICQ Controller` in the form of an `ACK` message. + +It should be noted that responses, which arrive in the form of the `ACK`, indicate that queries operate asynchronously. Consequently, these results will not be immediately accessible, requiring the user to wait for an emitted event on the response. For additional details, you can refer to the [Async ICQ Module](https://github.com/cosmos/ibc-apps/tree/main/modules/async-icq) developed by strangelove-ventures. + +### Note + +For `ICQ` to function correctly, it is essential to establish an `unordered channel` connecting the two chains. This channel should be configured utilizing the `oracle` and `icqhost` ports on the `ICQ Controller` and `ICQ Host` correspondingly. The `version` should be designated as `icq-1`. Moreover, it is crucial to ensure that the `HostEnabled` parameter is enabled with a value of `true`, while the `AllowQueries` parameter should encompass the path `"/provenance.oracle.v1.Query/Oracle"`. diff --git a/docs/sdk/oracle/02_state.md b/docs/sdk/oracle/02_state.md new file mode 100644 index 000000000..a2933086f --- /dev/null +++ b/docs/sdk/oracle/02_state.md @@ -0,0 +1,26 @@ + + +# State + +The oracle module manages the address of the Oracle and the ICQ state. + + + - [Oracle](#oracle) + - [IBC](#ibc) + + +--- +## Oracle + +The `Oracle` is a CosmWasm smart contract that the module forwards its queries to and relays responses from. Users can manipulate this state by submitting a update oracle proposal. + +* Oracle `0x01 -> []byte{}` + +--- +## IBC + +`IBC` communication exists between the `oracle` and `icqhost` modules. The `oracle` module tracks its channel's `port` in state. + +* Port `0x02 -> []byte{}` diff --git a/docs/sdk/oracle/03_messages.md b/docs/sdk/oracle/03_messages.md new file mode 100644 index 000000000..8eb34eaa0 --- /dev/null +++ b/docs/sdk/oracle/03_messages.md @@ -0,0 +1,46 @@ + + +# Messages + +In this section we describe the processing of the oracle messages and their corresponding updates to the state. + + + - [Msg/UpdateOracleRequest](#msgupdateoraclerequest) + - [Msg/SendQueryOracleRequest](#msgsendqueryoraclerequest) + + +--- +## Msg/UpdateOracleRequest + +The oracle's address is modified by proposing the `MsgUpdateOracleRequest` message. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/65865991f93e2c1a7647e29be11f6527f49616e6/proto/provenance/oracle/v1/tx.proto#L37-L46 + +### Response + ++++ https://github.com/provenance-io/provenance/blob/65865991f93e2c1a7647e29be11f6527f49616e6/proto/provenance/oracle/v1/tx.proto#L48-L49 + +The message will fail under the following conditions: +* The authority does not match the gov module. +* The new address does not pass basic integrity and format checks. + +## Msg/SendQueryOracleRequest + +Sends a query to another chain's `Oracle` using `ICQ`. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/65865991f93e2c1a7647e29be11f6527f49616e6/proto/provenance/oracle/v1/tx.proto#L21-L29 + +### Response + ++++ https://github.com/provenance-io/provenance/blob/65865991f93e2c1a7647e29be11f6527f49616e6/proto/provenance/oracle/v1/tx.proto#L31-L35 + +The message will fail under the following conditions: +* The authority does not pass basic integrity and format checks. +* The query does not have the correct format. +* The channel is invalid or does not pass basic integrity and format checks. diff --git a/docs/sdk/oracle/04_queries.md b/docs/sdk/oracle/04_queries.md new file mode 100644 index 000000000..c41647b12 --- /dev/null +++ b/docs/sdk/oracle/04_queries.md @@ -0,0 +1,38 @@ + + +# Queries + +In this section we describe the queries available for looking up oracle information. + + + - [Query Oracle Address](#query-oracle-address) + - [Query Oracle](#query-oracle) + +--- +## Query Oracle Address +The `QueryOracleAddress` query is used to obtain the address of the module's oracle. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/5afab1b1797b0071cf6a19ea5928c5b8f8831329/proto/provenance/oracle/v1/query.proto#L26-L27 + +### Response + ++++ https://github.com/provenance-io/provenance/blob/5afab1b1797b0071cf6a19ea5928c5b8f8831329/proto/provenance/oracle/v1/query.proto#L29-L33 + + +--- +## Query Oracle +The `QueryOracle` query forwards a query to the module's oracle. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/5afab1b1797b0071cf6a19ea5928c5b8f8831329/proto/provenance/oracle/v1/query.proto#L35-L39 + +### Response + ++++ https://github.com/provenance-io/provenance/blob/5afab1b1797b0071cf6a19ea5928c5b8f8831329/proto/provenance/oracle/v1/query.proto#L41-L45 + +The data from the `query` field is a `CosmWasm query` forwarded to the `oracle`. diff --git a/docs/sdk/oracle/05_events.md b/docs/sdk/oracle/05_events.md new file mode 100644 index 000000000..736ff720c --- /dev/null +++ b/docs/sdk/oracle/05_events.md @@ -0,0 +1,45 @@ + + +# Events + +The oracle module emits the following events: + + + - [EventOracleQuerySuccess](#eventoraclequerysuccess) + - [EventOracleQueryError](#eventoraclequeryerror) + - [EventOracleQueryTimeout](#eventoraclequerytimeout) + + +--- +## EventOracleQuerySuccess + +This event is emitted when an `ICQ` response is received from an `ACK` and is successful. + +| Type | Attribute Key | Attribute Value | +| ------------------ | ------------- | ----------------------------------- | +| OracleQuerySuccess | channel | Channel the ICQ request was sent on | +| OracleQuerySuccess | sequence_id | Sequence ID of the ICQ request | +| OracleQuerySuccess | result | Query data obtained from oracle | + +--- +## EventOracleQueryError + +This event is emitted when an `ICQ` response is received from an `ACK` and contains an error. + +| Type | Attribute Key | Attribute Value | +| ---------------- | ------------- | ----------------------------------- | +| OracleQueryError | channel | Channel the ICQ request was sent on | +| OracleQueryError | sequence_id | Sequence ID of the ICQ request | +| OracleQueryError | error | Error received from the module | + +--- +## EventOracleQueryTimeout + +This event is emitted when an `ICQ` request results in a `Timeout`. + +| Type | Attribute Key | Attribute Value | +| ------------------ | ------------- | ----------------------------------- | +| OracleQueryTimeout | channel | Channel the ICQ request was sent on | +| OracleQueryTimeout | sequence_id | Sequence ID of the ICQ request | diff --git a/docs/sdk/oracle/06_genesis.md b/docs/sdk/oracle/06_genesis.md new file mode 100644 index 000000000..7d5e744a0 --- /dev/null +++ b/docs/sdk/oracle/06_genesis.md @@ -0,0 +1,18 @@ + + +# Oracle Genesis + +In this section we describe the processing of the oracle messages and the corresponding updates to the state. + + + - [Msg/GenesisState](#msggenesisstate) + + +--- +## Msg/GenesisState + +The GenesisState encompasses the upcoming sequence ID for an ICQ packet, the associated parameters, the designated port ID for the module, and the oracle address. These values are both extracted for export and imported for storage within the store. + ++++ https://github.com/provenance-io/provenance/blob/ba0b65c54f61f99c951fe4694271847dbad0fb00/proto/provenance/oracle/v1/genesis.proto#L11-L24 diff --git a/docs/sdk/oracle/README.md b/docs/sdk/oracle/README.md new file mode 100644 index 000000000..f0b267283 --- /dev/null +++ b/docs/sdk/oracle/README.md @@ -0,0 +1,20 @@ +# `x/oracle` + +## Overview + +The oracle module provides the Provenance Blockchain with the capability to dynamically expose query endpoints through Interchain Queries (ICQ) + +One challenge that the Provenance Blockchain faces is supporting each Provenance Blockchain Zone with a unique set of queries. It is not feasible to create an evolving set of queries for each chain. Furthermore, it is not desirable for other parties to request Provenance to build these endpoints for them and then upgrade. This module resolves these issues by enabling Provenance Blockchain zones to manage their own oracle. + +## Acknowledgements + +We appreciate the substantial contributions made by Strangelove Ventures and Quasar Finance through their work on the [Async ICQ Module](https://github.com/cosmos/ibc-apps/tree/main/modules/async-icq) and [Interchain Query Demo](https://github.com/quasar-finance/interchain-query-demo). These resources were of paramount importance in informing the development of our oracle module. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[Messages](03_messages.md)** +4. **[Queries](04_queries.md)** +5. **[Events](05_events.md)** +6. **[Genesis](06_genesis.md)** \ No newline at end of file diff --git a/docs/sdk/reward/01_concepts.md b/docs/sdk/reward/01_concepts.md new file mode 100644 index 000000000..6456bf62d --- /dev/null +++ b/docs/sdk/reward/01_concepts.md @@ -0,0 +1,35 @@ + + +# Concepts + + + - [Reward Program](#reward-program) + - [Qualifying Actions and Eligibility Criteria](#qualifying-actions-and-eligibility-criteria) + - [Claim Period](#claim-period) + - [Reward Claim](#reward-claim) + - [Rollover](#rollover) + - [Refunding](#refunding) + +## Reward Program +Reward Programs are configurable campaigns that encourage users to participate in the Provenance Blockchain. Entities interested in creating a Reward Program will supply their new program with funds, set the duration of their program, and provide the participation requirements. + +## Qualifying Actions and Eligibility Criteria +A `Qualifying Action` is one or more transactions that a user performs on the Provenance Blockchain that has been listed within the `Reward Program`. These actions are then evaluated against a set of criteria that are also defined within the `Reward Program` known as `Eligiblity Criteria`. Users become participants in the Reward Program by performing a `Qualifying Action` and meeting all conditions specified by its `Eligiblity Criteria`. + +## Claim Period +A `Reward Program` is split into one or more time intervals known as `Claim Periods`. Each of these `Claim Periods` gets an equal portion of the `Reward Program Reward Pool` known as the `Claim Period Reward Pool`. Users can participate within these `Claim Periods` and are rewarded for their actions. + +## Reward Claim +When a user participates in a `Reward Program` they are granted one or more shares of the `Claim Period Reward Pool`. Once the`Claim Period` ends, the participant will be able to claim their reward by performing a claim transaction. The participant's reward is proportional to their activity compared to everyone else within a `Claim Period`. Users must claim their rewards before the `Reward Program` expires. Additionally, users will be limited to `max_reward_per_address` of the `Claim Period Reward Pool`. + +**Reward For Claim Period** + +$$\left( ClaimPeriodRewardPool \right) \times \left( EarnedShares \over ClaimPeriodShares \right) $$ + +## Rollover +It is possible that not all of the `Claim Period Reward Pool` will be distributed. This can happen when there is not enough activity, and participants are gated by the `max_reward_per_address`. The `Reward Program` will attempt to move these funds into a `Rollover Claim Period`. A `Rollover Claim Period` behaves exactly like any other `Claim Period`, but it is not guaranteed to have an equal portion of the original `Reward Program Reward Pool`. A `Reward Program` may run up to `max_rollover_claim_periods`, but is not guaranteed to run any of them. This is dependent on user activity, `program_end_time_max` field, and the `minimum_rollover_amount` field. Currently, the `minimum_rollover_amount` is set to 10% of the `Claim Period Reward Pool`. + +## Refunding +When a `Reward Program` ends it gives all participants `expiration_offset` seconds to claim their rewards. After `expiration_offset` seconds the `Reward Program` expires and prevents participants from claiming. The unclaimed rewards and any funds still remaining within the `Reward Program Reward Pool` will be given back to the creator. diff --git a/docs/sdk/reward/02_state.md b/docs/sdk/reward/02_state.md new file mode 100644 index 000000000..be3d433fb --- /dev/null +++ b/docs/sdk/reward/02_state.md @@ -0,0 +1,83 @@ + + +# State + +The rewards module manages the state of every reward program and each of its participants. + + + - [Reward Program](#reward-program) + - [Claim Period Reward Distribution](#claim-period-reward-distribution) + - [Reward Account State](#reward-account-state) + - [Action Counter](#action-counter) + - [Qualifying Actions](#qualifying-actions) + - [Action Delegate](#action-delegate) + - [Action Transfer](#action-transfer) + - [Action Vote](#action-vote) + +--- +## Reward Program + +A `RewardProgram` is the main data structure used by the Active Participation and Engagement (APE) module. It keeps track of the state, balances, qualifying actions, timers, and counters for a single Reward Program. Every Reward Program gets its own unique identifier that we track within the store. + +* Reward Program: `0x01 | RewardProgram ID (8 bytes) -> ProtocolBuffers(RewardProgram)` +* Reward Program ID: `0x02 -> uint64(RewardProgramID)` + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L12-L73 + +--- +## Claim Period Reward Distribution + +A `ClaimPeriodRewardDistribution` is created for each claim period of every `RewardProgram`. Its purpose is to track live claim period specific information. Examples of this include the total number of granted shares in the claim period, sum of of all its rewards given out as claims, and the amount of reward allocated to it from the `RewardProgram`. + +* Claim Period Reward Distribution: `0x03 | Reward Program ID (8 bytes) | Claim Period ID (8 bytes) -> ProtocolBuffers(ClaimPeriodRewardDistribution)` + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L75-L92 + +--- +## Reward Account State + +The purpose of `RewardAccountState` is to track state at the address level of a claim period. It counts the number of claim period shares the user obtained, the status of their `RewardClaim`, and other stateful information that assists the system in properly granting rewards. + +* AccountStateAddressLookupKeyPrefix: `0x04 | Account Address (n bytes, with the address length being stored in the first byte {int64(address[1:2][0])}) | Reward Program ID (8 bytes) | Claim Period ID (8 bytes) -> ProtocolBuffers(RewardAccountState)` +* AccountStateKeyPrefix: `0x05 | Reward Program ID (8 bytes) | Claim Period ID (8 bytes) | Account Address (n bytes, with the address length being stored in the first byte {int64(address[1:2][0])}) -> ProtocolBuffers(RewardAccountState)` + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L94-L123 + +### Action Counter + +`ActionCounter` tracks the number of times an action has been performed. + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L190-L199 + +--- +## Qualifying Actions + +A list of one or more actions that a user can perform to attempt to participate in a `RewardProgram`. In order to be considered a participant and granted a share then all the `EligiblityCriteria` on the action must be met. Each action has its own `EligiblityCriteria`, which is independently evaluated against system state and `RewardAccountState` for that user. Each `Qualifying Action` is evaluated independently, thus it is possible for a user to earn more than one reward for a single action. + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L125-L141 + +### Action Delegate + +`ActionDelegate` is when a user performs a delegate. + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L143-L162 + +The triggering account must have a total delegation amount within the bands of [`minimum_delegation_amount`,`maximum_delegation_amount`]. Additionally, the validator they are staking to must be within the [`minimum_active_stake_percentile`,`maximum_active_stake_percentile`] power percentile. If both of these criteria are met then the delegate is considered successful. The `minimum_actions` and `maximum_actions` fields are the number of successful delegate that must be performed. Once all these conditions are met then the user will receive a share. + +### Action Transfer + +`ActionTransfer` is when a user transfers coins. + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L164-L175 + +If the triggering account has delegated at least the `minimum_delegation_amount`, then the transfer action will be considered successful. The `minimum_actions` and `maximum_actions` fields are the number of successful transfer that must be performed. When all these conditions are met, then the user will receive a share. + +### Action Vote + +`ActionVote` is when a user votes on a proposal. + ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/reward.proto#L177-L188 + +If the triggering account has delegated at least the `minimum_delegation_amount`, then the vote action will be considered successful. The `minimum_actions` and `maximum_actions` fields are the number of successful votes that must be performed. When all these conditions are met, then the user will receive a share. diff --git a/docs/sdk/reward/03_state_transitions.md b/docs/sdk/reward/03_state_transitions.md new file mode 100644 index 000000000..08b57d499 --- /dev/null +++ b/docs/sdk/reward/03_state_transitions.md @@ -0,0 +1,72 @@ + + +# State Transitions + +This document describes the state transition operations involved in the rewards module. + + + - [Reward Programs](#reward-programs) + - [Pending ](#pending) + - [Started ](#started) + - [Finished ](#finished) + - [Expired](#expired) + - [Reward Claims](#reward-claims) + - [Unclaimable](#unclaimable) + - [Claimable](#claimable) + - [Claimed](#claimed) + - [Expired](#expired) + + + +## Reward Programs +State transition for Reward Programs happen on `BeginBlock` and make use of the `BlockTime` attribute. + +A Reward Program can be `Pending`, `Started`, `Finished`, or `Expired`. A Reward Program will move through all these states, and will initially be in the `Pending` state. + +#### Note +A Reward Program creator may end a Reward Program early while it's in `Pending` or `Started` state. A Reward Program in the `Pending` state will be deleted and not progress through all the states. Any program that is ended after it's in the `Started` state will transition to the `Finished` state on the next `BeginBlock`. + +### Pending +Reward program has *not* started. + +#### Note +A user may force a Reward Program in this state to end with the `end-reward-program` transaction. In this case the Reward Program will be deleted and not progress. + +### Started +The Reward Program has started, and users can participate by performing qualifying actions. Participants can claim their rewards at the end of the claim period that the qualifying action was performed in. + +#### Note +A user may force a Reward Program in this state to end with the `end-reward-program` transaction. The Reward Program will transition to the `Finished` state on the next `BeginBlock`. + +### Finished +The Reward Program has ended, and participants can no longer make qualifying actions. Participants have a limited amount of time to collect their remaining rewards. + +### Expired +Reward program has passed its expiration date, and participants can no longer claim rewards. The remaining balance and any unclaimed rewards will be returned to the creator. + +

+ +

+ +## Reward Claims +State transitions for a Reward Claim happen on `BeginBlock` and on claim transactions. + +A Reward Claim can be `Unclaimable`, `Claimable`, `Claimed`, or `Expired`. A Reward Claim will always start as `Unclaimable` and eventually become `Claimable`. If a participant claims their reward then the Reward Claim will become `Claimed`, otherwise it will timeout and enter the `Expired` state where they can no longer claim it. + +### Unclaimable +The reward has been granted to a participant, but it cannot be claimed until the current claim period ends. + +### Claimable +The reward has been granted to the participant, and it's claimable by the participant via a transaction. If the reward is not claimed it will eventually expire. + +### Claimed +The reward has been granted and received by the participant. A reward cannot be claimed more than once. + +### Expired +The reward has been cleaned up and the participant can no longer claim it. The funds attached to the reward claim are refunded back to the program creator. + +

+ +

diff --git a/docs/sdk/reward/04_messages.md b/docs/sdk/reward/04_messages.md new file mode 100644 index 000000000..4420e004f --- /dev/null +++ b/docs/sdk/reward/04_messages.md @@ -0,0 +1,79 @@ + + +# Messages + +In this section we describe the processing of the reward messages and the corresponding updates to the state. + + + - [Msg/CreateRewardProgramRequest](#msgcreaterewardprogramrequest) + - [Msg/EndRewardProgramRequest](#msgendrewardprogramrequest) + - [Msg/ClaimRewardRequest](#msgclaimrewardrequest) + - [Msg/ClaimAllRewardsRequest](#msgclaimallrewardsrequest) + + +## Msg/CreateRewardProgramRequest + +Creates a Reward Program that users can participate in. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L40-L72 + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L74-L78 + +The message will fail under the following conditions: +* The program start time is at the current block time or after +* The requester is unable to send the reward pool amount to module +* The title is empty or greater than 140 characters +* The description is empty or greater than 10000 characters +* The distribute from address is an invalid bech32 address +* The total reward pool amount is not positive +* The claim periods field is set to less than 1 +* The denominations are not in nhash +* There are no qualifying actions +* The qualifying actions are not valid + +## Msg/EndRewardProgramRequest + +Ends a Reward Program that is in either the PENDING or STARTED state. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L80-L89 + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L91-L92 + +The message will fail under the following conditions: +* The Reward Program does not end +* The Reward Program is not in PENDING or STARTED state +* The Reward Program owner does not match the specified address + +## Msg/ClaimRewardRequest + +Allows a participant to claim all their rewards for all past claim periods on a reward program. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L94-L100 + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L102-L107 + +The message will fail under the following conditions: +* The Reward Program does not exist +* The Reward Program is expired +* The Reward Address does not exist + +## Msg/ClaimAllRewardsRequest + +Allows a participant to claim all their rewards for all past claim periods on all reward programs. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L109-L113 + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/tx.proto#L115-L122 + +The message will fail under the following conditions: +* The Reward Address does not exist diff --git a/docs/sdk/reward/05_queries.md b/docs/sdk/reward/05_queries.md new file mode 100644 index 000000000..17ea01778 --- /dev/null +++ b/docs/sdk/reward/05_queries.md @@ -0,0 +1,88 @@ + + +# Rewards Queries +In this section we describe the queries available for looking up rewards information. + + + - [Query Reward Program By ID](#query-reward-program-by-id) + - [Query Reward Programs](#query-reward-programs) + - [Query Claim Period Reward Distribution By ID](#query-claim-period-reward-distribution-by-id) + - [Query Claim Period Reward Distributions](#query-claim-period-reward-distributions) + - [Query Rewards By Address](#query-rewards-by-address) + + +--- +## Query Reward Program By ID +The `QueryRewardProgramByID` query is used to obtain the content of a specific Reward Program. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L47-L51 + +The `id` is the unique identifier for the Reward Program. + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L53-L57 + + +--- +## Query Reward Programs +The `QueryRewardPrograms` query is used to obtain the content of all Reward Programs matching the supplied `query_type`. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L59-L80 + +The `query_type` is used to filter on the Reward Program state. The following are a list of `query_types`. +* ALL - All Reward Programs will be returned. +* PENDING - All Reward Programs that are in the `PENDING` state will be returned. +* ACTIVE - All Reward Programs that are in the `STARTED` state will be returned. +* OUTSTANDING - All Reward Programs that are either in the `PENDING` or `STARTED` state will be returned. +* FINISHED - All Reward Programs that are in the `FINISHED` or `EXPIRED` state will be returned. + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L82-L88 + + +--- +## Query Claim Period Reward Distribution By ID +The `QueryClaimPeriodRewardDistributionByID` query is used to obtain the content of a specific `Claim Period Reward Distribution`. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L104-L110 + +The `reward_id` is a unique identifier for the Reward Program and the `claim_id` is a unique identifier for the Reward Program's Claim Period. + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L112-L116 + + +--- +## Query Claim Period Reward Distributions +The `QueryClaimPeriodRewardDistributions` query is used to obtain the content of all `Claim Period Reward Distributions` matching the supplied `query_type`. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L90-L94 + +The `pagination` field is used to help limit the number of results. + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L96-L102 + + +--- +## Query Rewards By Address +The `QueryRewardsByAddress` query is used to obtain the status of the address' Reward Claims. + +### Request ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L118-L126 + +The `address` field is the bech32 address of the user to list Reward Claims for. The `claim_status` is used to filter on the Reward Claim. The following are a list of `claim_status`. +* ALL - All Reward Claims are returned. +* UNCLAIMABLE - All Reward Claims that are not yet eligible to be claimed. +* CLAIMABLE - All Reward Claims that are still eligible to be claimed. +* CLAIMED - All Reward Claims that have been claimed. +* EXPIRED - All Reward Claims that have expired. + +### Response ++++ https://github.com/provenance-io/provenance/blob/243a89c76378bb5af8a8017e099ee04ac22e99ce/proto/provenance/reward/v1/query.proto#L128-L136 diff --git a/docs/sdk/reward/06_events.md b/docs/sdk/reward/06_events.md new file mode 100644 index 000000000..3190cb00b --- /dev/null +++ b/docs/sdk/reward/06_events.md @@ -0,0 +1,87 @@ + + +# Events + +The rewards module emits the following events: + + + - [Reward Program Created](#reward-program-created) + - [Reward Program Started](#reward-program-started) + - [Reward Program Finished](#reward-program-finished) + - [Reward Program Expired](#reward-program-expired) + - [Reward Program Ended](#reward-program-ended) + - [Claim Rewards](#claim-rewards) + - [Claim All Rewards](#claim-all-rewards) + + +--- +## Reward Program Created + +Fires when a reward program is created with the Create Reward Program Msg. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| RewardProgramCreated | reward_program_created| \{ID string\} | + +--- +## Reward Program Started + +Fires when a reward program transitions to the STARTED state. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| RewardProgramStarted | reward_program_id | \{ID string\} | + +--- +## Reward Program Finished + +Fires when a reward program transitions to the FINISHED state. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| RewardProgramFinished | reward_program_id | \{ID string\} | + +--- +## Reward Program Expired + +Fires when a reward program transitions to the EXPIRED state. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| RewardProgramExpired | reward_program_id | \{ID string\} | + +--- +## Reward Program Ended + +Fires when a reward program is ended with the End Reward Program Msg. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| RewardProgramEnded | reward_program_id | \{ID string\} | + +--- +## Claim Rewards + +Fires when a participant claims a reward claim with Claim Reward Msg. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| ClaimRewards | reward_program_id | \{ID string\} | +| ClaimRewards | rewards_claim_address | \{bech32address string\} | + +This event will not fire if the user has no claims or if they have already claimed all their rewards. + +--- +## Claim All Rewards + +Fires when a participant claims all their reward claims with Claim Reward Msg. + +| Type | Attribute Key | Attribute Value | +| ---------------------- | --------------------- | ------------------------- | +| ClaimAllRewards | rewards_claim_address | \{bech32address string\} | + +This event will not fire if the user has no claims or if they have already claimed all their rewards. + +--- diff --git a/docs/sdk/reward/07_begin_and_end_blocker.md b/docs/sdk/reward/07_begin_and_end_blocker.md new file mode 100644 index 000000000..4764d3f82 --- /dev/null +++ b/docs/sdk/reward/07_begin_and_end_blocker.md @@ -0,0 +1,30 @@ + + +# Begin Blocker +The `BeginBlocker` abci call is invoked on the beginning of each block. The newly created block's `BlockTime` field is used to update the `Reward Programs` and `Reward Claims`. + +## State Machine Update +The following conditional logic is evaluated to help a `Reward Program` transition between states: +1. Starts a `Reward Program` if the `BlockTime` >= `program_start_time`. +2. Evaluates if `BlockTime` >= `claim_period_end_time`, and if it evaluates to true the `Reward Program` will *attempt* to progress to the next claim period. +3. The Reward Program will successfully progress to the next claim period, if all of the following criteria are true: + 1. `remaining_pool_balance` >= `minimum_rollover_amount` + 2. `BlockTime` < `program_end_time_max` +4. If either of the previously mentioned criteria is not met, then the `Reward Program` will end. +5. A completed `Reward Program` will then expire after `reward_claim_expiration_offset` seconds from its completion time. +6. All expired `Reward Program` will return any unused funds to the reward creator and expire any unclaimed rewards. + +# End Blocker +The `EndBlocker` abci call is ran at the end of each block. The `EventManager` is monitored and `Qualifying Actions` are deduced from newly created events and prior internal state. + +## Qualifying Action Detection +The following is logic is used to detect `Qualifying Actions` and grant shares: +1. The `EventManager` is utilized to traverse the events from the newly created block. +2. Using the `QualifyingAction` types defined in each active `RewardProgram`, the module attempts to build actions using the event and any prior events. +3. Each detected action will then be evaluated and only deemed a `QualifyingAction` if it matches the `Evaluation Criteria`. +4. One or more shares will be granted to the participant performing the `QualifyingAction`. +5. Participants can then claim these shares once the claim period that they were earned in completes. +6. Detects qualifying events based on events in EventManager for reward programs currently running. +7. Gives out shares for a running rewards program for that claim period for a given address performing qualifying events. diff --git a/docs/sdk/reward/08_genesis.md b/docs/sdk/reward/08_genesis.md new file mode 100644 index 000000000..7b6c97e8c --- /dev/null +++ b/docs/sdk/reward/08_genesis.md @@ -0,0 +1,13 @@ + + +# Reward Genesis + +In this section we describe the processing of the reward messages and the corresponding updates to the state. + + +## Msg/GenesisState +GenesisState contains a list of reward programs, claim period reward distributions, and reward account states. These are exported and later imported from/to the store. + ++++ https://github.com/provenance-io/provenance/blob/ccaef3a7024f0ccd73d175465e91577373127858/proto/provenance/reward/v1/genesis.proto#L13-L22 diff --git a/docs/sdk/reward/README.md b/docs/sdk/reward/README.md new file mode 100644 index 000000000..9a69632a3 --- /dev/null +++ b/docs/sdk/reward/README.md @@ -0,0 +1,16 @@ +# `x/rewards` + +## Overview + +The purpose of the rewards module, also known as 'Active Participation and Engagement (APE) module', is to engage and reward users for Provenance Blockchain activity. The Provenance Blockchain Foundation, or any other interested party, can create and customize their own campaigns known as Reward Programs. Provenance Blockchain users can then participate in these programs by performing the actions defined on the Reward Program. Participants will be granted shares for each qualifying action and can claim them for the respective Reward Program's reward. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[State transitions](03_state_transitions.md)** +4. **[Messages](04_messages.md)** +5. **[Queries](05_queries.md)** +6. **[Events](06_events.md)** +7. **[Begin and End Blocker](07_begin_and_end_blocker.md)** +8. **[Genesis](08_genesis.md)** \ No newline at end of file diff --git a/docs/sdk/reward/diagrams/reward-claim/RewardClaim.png b/docs/sdk/reward/diagrams/reward-claim/RewardClaim.png new file mode 100644 index 000000000..2d946467d Binary files /dev/null and b/docs/sdk/reward/diagrams/reward-claim/RewardClaim.png differ diff --git a/docs/sdk/reward/diagrams/reward-claim/reward-claim.plantuml b/docs/sdk/reward/diagrams/reward-claim/reward-claim.plantuml new file mode 100644 index 000000000..659aca0d6 --- /dev/null +++ b/docs/sdk/reward/diagrams/reward-claim/reward-claim.plantuml @@ -0,0 +1,22 @@ +@startuml RewardClaim + +skinparam linetype ortho +skinparam SequenceMessageAlign center +hide empty description +state c <> +state Unclaimable +state Claimable +state Claimed +state Expired + +[*] -down-> Unclaimable : [Participant engages in qualifying action] +Unclaimable -down-> Claimable : [Claim period ends] +Claimable -down-> c +c -down-> Expired : [End time + Offset <= Block Time] +c -down-> Claimed : [Participant claims transaction] + +@enduml \ No newline at end of file diff --git a/docs/sdk/reward/diagrams/reward-program/RewardProgram.png b/docs/sdk/reward/diagrams/reward-program/RewardProgram.png new file mode 100644 index 000000000..3e5d7ae59 Binary files /dev/null and b/docs/sdk/reward/diagrams/reward-program/RewardProgram.png differ diff --git a/docs/sdk/reward/diagrams/reward-program/reward-program.plantuml b/docs/sdk/reward/diagrams/reward-program/reward-program.plantuml new file mode 100644 index 000000000..7dcfb549f --- /dev/null +++ b/docs/sdk/reward/diagrams/reward-program/reward-program.plantuml @@ -0,0 +1,21 @@ +@startuml RewardProgram + +skinparam linetype ortho +skinparam SequenceMessageAlign center +hide empty description + +state Pending +state Started +state Finished +state Expired + +[*] -down-> Pending +Finished -down-> Expired : [End Time + Offset time <= Block Time] +Pending -down-> Started : [Start Time <= Block Time] +Started -down-> Finished : [End Time <= Block Time OR\nRemaining Balance <= Minimum Rollover] + +@enduml \ No newline at end of file diff --git a/docs/sdk/trigger/01_concepts.md b/docs/sdk/trigger/01_concepts.md new file mode 100644 index 000000000..e46fd5457 --- /dev/null +++ b/docs/sdk/trigger/01_concepts.md @@ -0,0 +1,51 @@ + + +# Concepts + +The trigger module allows users to delay the execution of a message until an event is detected. Users should have a strong understanding of what a `Trigger`, `Event`, `Queued Trigger` are, and how `Payment` works before using this module. + + + - [Trigger](#trigger) + - [Actions](#actions) + - [Gas Payment](#gas-payment) + - [Block Event](#block-event) + - [Transaction Event](#transaction-event) + - [Block Height Events](#block-height-events) + - [Block Time Event](#block-time-event) + - [Queued Trigger](#queued-trigger) + + + +## Trigger + +A `Trigger` is an address owned object that registers to a `Block Event`, and then proceeds to fire off its `Actions` when that `Block Event` has been detected by the system. A `Trigger` is single-shot, and it will automatically be destroyed after its `Block Event` has been detected. + +## Actions + +`Actions` are one or more messages that should be invoked. Every `Action` follows the same rules as a sdk message and requires purchased gas to run. See the `Gas Payment` section for more information. + +## Gas Payment + +Gas is vital in running the `Actions`, and in order to simplify the system as much as possible we leave it up to the user to calculate gas usage. When a user creates a `Trigger` they are required to purchase gas for the transaction AND the `Actions`. The remaining gas that is not used by the creation transaction will be rolled into a gas meter for the `Actions`. These `Actions` will only run and update state if their is enough allocated gas. + +## Block Event + +A `Block Event` is a blanket term that refers to events that occur during the creation of a block. The `Trigger` module currently supports `Transaction Events`, `Block Height Events`, and `Block Time Events`. + +### Transaction Event + +These type of events refer to the `ABCI Events` that are emitted by the `DeliverTx` transactions. An `ABCI Event` must have the same `Type` and `Attributes` as the user defined `Transaction Event` for the event criteria to be met. A user defined `Attribute` with an empty `Value` will always match as long as the `Attribute Name` field matches. + +### Block Height Events + +These type of events refer to the `Block Height` on a newly created block. The `Block Height` must be greater than or equal to the defined value for the event criteria to be met. + +### Block Time Event + +These type of events refer to the `Block Time` on a newly created block. The `Block Time` must be greater than or equal to the defined value for the event criteria to be met. + +## Queued Trigger + +The `Queued Trigger` is a `Trigger` that is ready to have its actions be executed at a future block. diff --git a/docs/sdk/trigger/02_state.md b/docs/sdk/trigger/02_state.md new file mode 100644 index 000000000..f5164d389 --- /dev/null +++ b/docs/sdk/trigger/02_state.md @@ -0,0 +1,70 @@ + + +# State + +The trigger module manages the state of every trigger. + +--- + + - [Trigger](#trigger) + - [TriggerEventI](#triggereventi) + - [BlockHeightEvent](#blockheightevent) + - [BlockTimeEvent](#blocktimeevent) + - [TransactionEvent](#transactionevent) + - [Queue](#queue) + + + +## Trigger + +A `Trigger` is the main data structure used by the module. It keeps track of the owner, event, and actions for a single `Trigger`. Every `Trigger` gets its own unique identifier, and a unique entry within the `Event Listener` and `Gas Limit` tables. The `Event Listener` table allows the event detection system to quickly filter applicable `Triggers` by name and type. A trigger can vary in size making it difficult to calculate gas usage on store, thus we opted to store remaining transaction gas in the `Gas Limit` table. It gives us a predictable way to calculate and store remaining gas. + +The excess gas on a MsgCreateTrigger transaction will be used for the `Trigger's` `Gas Limit` table. The maximum `Gas Limit` for a `Trigger` is `2000000`. + +* Trigger: `0x01 | Trigger ID (8 bytes) -> ProtocolBuffers(Trigger)` +* Trigger ID: `0x05 -> uint64(TriggerID)` +* Event Listener: `0x02 | Event Type (32 bytes) | Order (8 bytes) -> []byte{}` +* Gas Limit: `0x04 | Trigger ID (8 bytes) -> uint64(GasLimit)` + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/trigger.proto#L13-L26 + +### TriggerEventI + +A `Trigger` must have an event that implements the `TriggerEventI` interface. Currently, the system supports `BlockHeightEvent`, `BlockTimeEvent`, and `TransactionEvent`. + +#### BlockHeightEvent + +The `BlockHeightEvent` allows the user to configure their `Trigger` to fire when the current block's `Block Height` is greater than or equal to the defined one. + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/trigger.proto#L41-L49 + +#### BlockTimeEvent + +The `BlockTimeEvent` allows the user to configure their `Trigger` to fire when the current block's `Block Time` is greater than or equal to the defined one. + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/trigger.proto#L51-L59 + +#### TransactionEvent + +The `TransactionEvent` allows the user to configure their `Trigger` to fire when a transaction event matching the user defined one has been emitted. + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/trigger.proto#L61-L71 + +##### Attribute + +The `Attribute` is used by the `TransactionEvent` to allow the user to configure which attributes must be present on the transaction event. An `Attribute` with an empty `value` will only require the `name` to match. + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/trigger.proto#L73-L82 + +--- +## Queue + +The `Queue` is an internal structure that we use to store and throttle the execution of `Triggers` on the `BeginBlock`. We store each `Trigger` as a `QueuedTrigger`, and then manipulate the `Queue Start Index` and `Queue Length` whenever we add or remove from the `Queue`. When we add to the `Queue`, the new element is added to the `QueueStartIndex` + `Length`. The `QueueLength` is then incremented by one. When we dequeue from the Queue, the `QueueStartIndex` will be incremented by 1 and the `QueueLength` is decremented by 1. We also ensure the key of the dequeued element is removed. + +* Queue Item: `0x03 | Queue Index (8 bytes) -> ProtocolBuffers(QueuedTrigger)` +* Queue Start Index: `0x06 -> uint64(QueueStartIndex)` +* Queue Length: `0x07 -> uint64(QueueLength)` + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/trigger.proto#L28-L39 diff --git a/docs/sdk/trigger/03_messages.md b/docs/sdk/trigger/03_messages.md new file mode 100644 index 000000000..3f5f4feb8 --- /dev/null +++ b/docs/sdk/trigger/03_messages.md @@ -0,0 +1,47 @@ + + +# Messages + +In this section we describe the processing of the trigger messages and the corresponding updates to the state. + + + - [Msg/CreateTriggerRequest](#msgcreatetriggerrequest) + - [Msg/DestroyTriggerRequest](#msgdestroytriggerrequest) + + +## Msg/CreateTriggerRequest + +Creates a `Trigger` that will fire when its event has been detected. If the message has more than one signer, then the newly created `Trigger` will designate the first signer as the owner. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/tx.proto#L20-L31 + +### Response + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/tx.proto#L33-L37 + +The message will fail under the following conditions: +* The authority is an invalid bech32 address +* The event does not implement `TriggerEventI` +* The actions list is empty +* At least one action is not a valid `sdk.Msg` +* The signers on one or more actions aren't in the set of the request's signers. + +## Msg/DestroyTriggerRequest + +Destroys a `Trigger` that has been created and is still registered. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/tx.proto#L39-L48 + +### Response + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/tx.proto#L50-L51 + +The message will fail under the following conditions: +* The `Trigger` does not exist +* The `Trigger` owner does not match the specified address diff --git a/docs/sdk/trigger/04_queries.md b/docs/sdk/trigger/04_queries.md new file mode 100644 index 000000000..c552fb5e7 --- /dev/null +++ b/docs/sdk/trigger/04_queries.md @@ -0,0 +1,41 @@ + + +# Trigger Queries + +In this section we describe the queries available for looking up trigger information. + + + - [Query Trigger By ID](#query-trigger-by-id) + - [Query Triggers](#query-triggers) + + +--- +## Query Trigger By ID + +The `QueryTriggerByID` query is used to obtain the content of a specific Trigger. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/query.proto#L25-L29 + +The `id` is the unique identifier for the Trigger. + +### Response + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/query.proto#L31-L35 + + +--- +## Query Triggers + +The `QueryTriggers` query is used to obtain all Triggers. + +### Request + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/query.proto#L37-L41 + +### Response + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/query.proto#L43-L49 diff --git a/docs/sdk/trigger/05_events.md b/docs/sdk/trigger/05_events.md new file mode 100644 index 000000000..ed3b29f01 --- /dev/null +++ b/docs/sdk/trigger/05_events.md @@ -0,0 +1,49 @@ + + +# Events + +The trigger module emits the following events: + + + - [Trigger Created](#trigger-created) + - [Trigger Destroyed](#trigger-destroyed) + - [Trigger Detected](#trigger-detected) + - [Trigger Executed](#trigger-executed) + +--- +## Trigger Created + +Fires when a trigger is created with the CreateTriggerMsg. + +| Type | Attribute Key | Attribute Value | +| -------------- | ------------- | ----------------------------- | +| TriggerCreated | trigger_id | The ID of the created trigger | + +--- +## Trigger Destroyed + +Fires when a trigger is destroyed with the DestroyTriggerMsg. + +| Type | Attribute Key | Attribute Value | +| ---------------- | ------------- | ------------------------------------- | +| TriggerDestroyed | trigger_id | The ID of the trigger being destroyed | +--- +## Trigger Detected + +Fires when a trigger's event is detected in the EndBlocker. + +| Type | Attribute Key | Attribute Value | +| --------------- | ------------- | ------------------------------------ | +| TriggerDetected | trigger_id | The ID of the trigger being detected | +--- +## Trigger Executed + +Fires when a trigger's actions are executed in the BeginBlocker. + +| Type | Attribute Key | Attribute Value | +| --------------- | ------------- | ------------------------------------------------------------- | +| TriggerExecuted | trigger_id | The ID of the trigger being executed | +| TriggerExecuted | owner | The sdk.Address of the trigger's owner | +| TriggerExecuted | success | A boolean indicating if all the actions successfully executed | diff --git a/docs/sdk/trigger/06_begin_and_end_blocker.md b/docs/sdk/trigger/06_begin_and_end_blocker.md new file mode 100644 index 000000000..ca4cd1431 --- /dev/null +++ b/docs/sdk/trigger/06_begin_and_end_blocker.md @@ -0,0 +1,41 @@ + + + + - [Begin Blocker](#begin-blocker) + - [Trigger Execution](#trigger-execution) + - [End Blocker](#end-blocker) + - [Block Event Detection](#block-event-detection) + +# Begin Blocker + +The `BeginBlocker` abci call is invoked on the beginning of each block. `Triggers` will be dequeued and ran. + +## Trigger Execution + +The following steps are performed on each `BeginBlocker`: +2. A `Trigger` is removed from the `Queue`. +3. The `Gas Limit` for the `Trigger` is retrieved from the store. +4. A `GasMeter` is created for the `Trigger`. +5. An `Action` on the `Trigger` is ran updating and verifying gas usage against the `GasMeter` +6. The events for the `Action` are emitted. +7. Step 5 is repeated until no more `Actions` exist for the trigger. +8. Step 1 is repeated until the `Queue` is empty or the `throttling limit` has been reached. + +### Note + +We have implemented a `throttling limit` within the module's `BeginBlocker`, effectively enforcing a maximum of 5 actions and a gas limit of 2,000,000 per `BeginBlock`. + +# End Blocker + +The `EndBlocker` abci call is ran at the end of each block. The `EventManager`, `BlockHeight`, and `BlockTime` are monitored and used to detect `Trigger` activation. + +## Block Event Detection + +The following is logic is used to detect the activation of a `Trigger`: +1. The `EventManager` is utilized to traverse the transaction events from the newly created block. +2. The `Event Listener` table filters for `Triggers` containing a `TransactionEvent` matching the transaction event types and containing the defined `Attributes`. +3. The `Event Listener` table filters for `Triggers` containing a `BlockHeightEvent` that is greater than or equal to the current `BlockHeight`. +4. The `Event Listener` table filters for `Triggers` containing a `BlockTimeEvent` that is greater than or equal to the current `BlockTime`. +5. These `Triggers` are then unregistered and added to the `Queue`. diff --git a/docs/sdk/trigger/07_genesis.md b/docs/sdk/trigger/07_genesis.md new file mode 100644 index 000000000..474396146 --- /dev/null +++ b/docs/sdk/trigger/07_genesis.md @@ -0,0 +1,14 @@ + + +# Trigger Genesis + +In this section we describe the processing of the trigger messages and the corresponding updates to the state. + + +## Msg/GenesisState + +GenesisState contains a list of triggers, queued triggers, and gas limits. It also tracks the triggerID and the queue start. These are exported and later imported from/to the store. + ++++ https://github.com/provenance-io/provenance/blob/bda28e5f58a4a58e8fef21141400ad362b84518b/proto/provenance/trigger/v1/genesis.proto#L11-L30 diff --git a/docs/sdk/trigger/README.md b/docs/sdk/trigger/README.md new file mode 100644 index 000000000..d27e075fa --- /dev/null +++ b/docs/sdk/trigger/README.md @@ -0,0 +1,15 @@ +# `x/trigger` + +## Overview + +The 'Trigger module' provides Provenance Blockchain users with the capability to schedule the execution of specific transactions to occur automatically after a predetermined event. This powerful feature creates a more sophisticated and responsive system that allows users to seamlessly react to Provenance Blockchain events and send transactions in response. + +## Contents + +1. **[Concepts](01_concepts.md)** +2. **[State](02_state.md)** +3. **[Messages](03_messages.md)** +4. **[Queries](04_queries.md)** +5. **[Events](05_events.md)** +6. **[Begin and End Blocker](06_begin_and_end_blocker.md)** +7. **[Genesis](07_genesis.md)** \ No newline at end of file diff --git a/sidebars.js b/sidebars.js index 7afaef43f..36c2a99f0 100644 --- a/sidebars.js +++ b/sidebars.js @@ -495,6 +495,14 @@ const sidebars = { 'pb/faq/transactions-error-codes', ], }, + { + type: 'category', + label: 'SDK', + className: 'section-heading', + collapsible: true, + collapsed: true, + items: [{ type: 'autogenerated', dirName: 'sdk' }], + }, { type: 'category', label: 'License', diff --git a/sync_script.sh b/sync_script.sh new file mode 100755 index 000000000..60871e8c2 --- /dev/null +++ b/sync_script.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +set -e -o pipefail + +# Set the remote repository URL to clone from +REMOTE_REPO_URL="https://github.com/provenance-io/provenance.git" + +# Store the current working directory in WORK_DIR +WORK_DIR=$(pwd) + +# Remove any existing 'prov-sdk' directory and clone the remote repository +rm -rf ./prov-sdk +git clone "$REMOTE_REPO_URL" prov-sdk + +# Read the versions from a JSON file and remove the 'v' prefix +VERSIONS=($(jq -r '.[]' versions.json)) + +VERSIONS+=("main") + +# Iterate over each version +for version in "${VERSIONS[@]}"; do + echo "$version" + if [ "$version" == "main" ]; then + branch="main" # Set the branch to 'main' + version_directory="" # For 'main', the version directory is empty + else + version="${version#v}" # Remove the 'v' prefix from the version number + branch="release/v$version.x" # Determine the branch name + version_directory="version-$version" # Create a directory name based on the version + fi + + # Change to the 'prov-sdk' directory + cd $WORK_DIR/prov-sdk + + # Fetch the branch from the remote repository and switch to it + git fetch origin "$branch" + git checkout "$branch" + + # Check if the branch exists in the remote repository + if ! git show-ref --verify "refs/remotes/origin/$branch" &>/dev/null; then + echo "Branch $branch does not exist in the remote repository." + continue + else + echo "Branch $branch exists, continuing..." + fi + + # build the docs + cd $WORK_DIR/prov-sdk/docs + for D in ../x/*; do + echo "Processing ${D}" + if [ -d "${D}" ]; then + DIR_NAME=$(echo $D | awk -F/ '{print $NF}') + MODDOC=sdk/$DIR_NAME + rm -rf $MODDOC + mkdir -p $MODDOC + # ibchooks doesn't have a spec folder + if [ -f "$D/README.md" ]; then + cp -r $D/README.md $MODDOC/ + fi + # The rest of the modules have a spec folder + if [ -f "$D/spec/README.md" ]; then + cp -r $D/spec/* $MODDOC/ + fi + fi + done + + # Add modules page list + cat ../x/README.md | sed 's/\/spec\//\//g' > ./sdk/README.md + + for folder in "sdk"; do + if [ "$version" == "main" ]; then + cp -r "$WORK_DIR/prov-sdk/docs/$folder" "$WORK_DIR/docs/" + else + cp -r "$WORK_DIR/prov-sdk/docs/$folder" "$WORK_DIR/versioned_docs/$version_directory/" + fi + done + if [ "$version" == "main" ]; then + cp -r "$WORK_DIR/prov-sdk/client/docs/swagger-ui/swagger.yaml" "$WORK_DIR/openapi/" + fi +done + + + +# Leave the 'prov-sdk' directory after processing +cd "$WORK_DIR" + +# Remove the 'prov-sdk' directory if needed +rm -rf ./prov-sdk diff --git a/versions.json b/versions.json new file mode 100644 index 000000000..0637a088a --- /dev/null +++ b/versions.json @@ -0,0 +1 @@ +[] \ No newline at end of file