diff --git a/.github/workflows/action-updater.yml b/.github/workflows/action-updater.yml index ca2c6ba9d..e93f79166 100644 --- a/.github/workflows/action-updater.yml +++ b/.github/workflows/action-updater.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.4 + - uses: actions/checkout@v4 with: # [Required] Access token with `workflow` scope. token: ${{ secrets.ACTION_UPDATER }} diff --git a/.github/workflows/adoc-html.yml b/.github/workflows/adoc-html.yml index 72c4c80ec..1c71b531d 100644 --- a/.github/workflows/adoc-html.yml +++ b/.github/workflows/adoc-html.yml @@ -9,8 +9,8 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.4 - - uses: actions/setup-node@v4.0.2 + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 with: node-version: 20 - name: Convert adoc diff --git a/.github/workflows/backport-5-0.yml b/.github/workflows/backport-5-0.yml index a895ad53e..3dffc7647 100644 --- a/.github/workflows/backport-5-0.yml +++ b/.github/workflows/backport-5-0.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-1.yml b/.github/workflows/backport-5-1.yml index 42515b977..5964dfc32 100644 --- a/.github/workflows/backport-5-1.yml +++ b/.github/workflows/backport-5-1.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-2.yml b/.github/workflows/backport-5-2.yml index cb57cada4..47f5aed75 100644 --- a/.github/workflows/backport-5-2.yml +++ b/.github/workflows/backport-5-2.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-3.yml b/.github/workflows/backport-5-3.yml index 3a2db103e..8c281af0a 100644 --- a/.github/workflows/backport-5-3.yml +++ b/.github/workflows/backport-5-3.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/backport-5-4.yml b/.github/workflows/backport-5-4.yml index fdd62ebf9..c6ff5061a 100644 --- a/.github/workflows/backport-5-4.yml +++ b/.github/workflows/backport-5-4.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index a5ba1108c..2ad888348 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/forwardport.yml b/.github/workflows/forwardport.yml index d1e591d00..48f007ddc 100644 --- a/.github/workflows/forwardport.yml +++ b/.github/workflows/forwardport.yml @@ -12,7 +12,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@v4.1.4 + uses: actions/checkout@v4 with: fetch-depth: 0 diff --git a/.github/workflows/to-plain-html.yml b/.github/workflows/to-plain-html.yml index b5ba50676..2320157b9 100644 --- a/.github/workflows/to-plain-html.yml +++ b/.github/workflows/to-plain-html.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.4 + - uses: actions/checkout@v4 with: token: ${{ secrets.TO_HTML }} - name: Asciidoc to html diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index 5c1f2a4c5..870597156 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -13,8 +13,8 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.4 - - uses: actions/setup-node@v4.0.2 + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 with: node-version: 20 - name: Check for broken internal links diff --git a/docs/modules/clients/pages/memcache.adoc b/docs/modules/clients/pages/memcache.adoc index b46c460e3..974afc0a8 100644 --- a/docs/modules/clients/pages/memcache.adoc +++ b/docs/modules/clients/pages/memcache.adoc @@ -1,12 +1,11 @@ = Memcache Client -NOTE: Hazelcast Memcache Client only supports ASCII protocol. Binary Protocol is not supported. - A Memcache client written in any language can talk directly to a Hazelcast cluster. No additional configuration is required. -To be able to use a Memcache client, you must enable -the Memcache client request listener service using either one of the following configuration options: +NOTE: Hazelcast Memcache Client only supports ASCII protocol. Binary Protocol is not supported. + +To be able to use a Memcache client, you must enable the Memcache client request listener service using either one of the following configuration options: 1 - Using the `network` configuration element: diff --git a/docs/modules/clusters/partials/ucn-migrate-tip.adoc b/docs/modules/clusters/partials/ucn-migrate-tip.adoc index 88b7e6ec7..3b56e1023 100644 --- a/docs/modules/clusters/partials/ucn-migrate-tip.adoc +++ b/docs/modules/clusters/partials/ucn-migrate-tip.adoc @@ -1 +1 @@ -CAUTION: {ucd} has been deprecated and will be removed in the next major version. To continue deploying your user code after this time, {open-source-product-name} users can either upgrade to {enterprise-product-name}, or add their resources to the Hazelcast member class paths. Hazelcast recommends that {enterprise-product-name} users migrate their user code to use {ucn}. For further information on migrating from {ucd} to {ucn}, see the xref:clusters:ucn-migrate-ucd.adoc[] topic. \ No newline at end of file +CAUTION: {ucd} has been deprecated and will be removed in the next major version. To continue deploying your user code after this time, {open-source-product-name} users can either upgrade to {enterprise-product-name}, or add their resources to the Hazelcast member class paths. Hazelcast recommends that {enterprise-product-name} users migrate their user code to use {ucn} for all purposes other than Jet stream processing. For further information on migrating from {ucd} to {ucn}, see xref:clusters:ucn-migrate-ucd.adoc[]. \ No newline at end of file diff --git a/docs/modules/data-structures/pages/map-config.adoc b/docs/modules/data-structures/pages/map-config.adoc index bee7e009c..4b24f5c87 100644 --- a/docs/modules/data-structures/pages/map-config.adoc +++ b/docs/modules/data-structures/pages/map-config.adoc @@ -4,6 +4,7 @@ {description} +[[map-configuration-defaults]] == Hazelcast Map Configuration Defaults The `hazelcast.xml`/`hazelcast.yaml` configuration included with your Hazelcast distribution includes the following default settings for maps. @@ -33,9 +34,22 @@ For details on map backups, refer to xref:backing-up-maps.adoc[]. For details on in-memory format, refer to xref:setting-data-format.adoc[]. -== Modifying the Default Configuration +== The Default (Fallback) Map Configuration +When a map is created, if the map name matches an entry in the `hazelcast.xml`/`hazelcast.yaml` file, the values in the matching entry are used to overwrite the initial values +discussed in the <> section. -You can create a default configuration for all maps for your environment by modifying the map configuration block named "default" in your `hazelcast.xml`/`hazelcast.yaml` file. In the following example, we set expiration timers for map entries. Map entries that are idle for an hour will be marked as eligible for removal if the cluster begins to run out of memory. Any map entry older than six hours will be marked as eligible for removal. +Maps that do not have any configuration defined use the default configuration. If you want to set a configuration that is valid for all maps, you can name your configuration as `default`. A user-defined default configuration applies to every map that does not have a specific custom map configuration defined with the map’s name. You can also use wildcards to associate your configuration with multiple map names. See the [configuration documentation](https://docs.hazelcast.com/hazelcast/5.5/configuration/using-wildcards) for more information about wildcards. + +When a map name does not match any entry in the `hazelcast.xml`/`hazelcast.yaml` file then: + +- If the `default` map configuration exists, the values under this entry are used to overwrite initial values. Therefore, `default` serves as a fallback. + +- If a `default` map configuration does not exist, the map is created with initial values as discussed in <>. + + +== Modifying the Default (Fallback) Configuration + +In the following example, we set expiration timers for dynamically created maps that lack a named configuration block. Map entries that are idle for an hour will be marked as eligible for removal if the cluster begins to run out of memory. Any map entry older than six hours will be marked as eligible for removal. For more on entry expiration, go to xref:managing-map-memory.adoc[Managing Map Memory]. diff --git a/docs/modules/integrate/pages/feature-engineering-with-feast.adoc b/docs/modules/integrate/pages/feature-engineering-with-feast.adoc index 20caff7d1..11681984a 100644 --- a/docs/modules/integrate/pages/feature-engineering-with-feast.adoc +++ b/docs/modules/integrate/pages/feature-engineering-with-feast.adoc @@ -16,7 +16,7 @@ image:ROOT:feast_batch.png[Feast batch wokflow] You will need the following ready before starting the tutorial: -* Hazelcast CLC. link:https://docs.hazelcast.com/clc/latest/install-clc[Installation instructions] +* Hazelcast CLC (see link:https://docs.hazelcast.com/clc/latest/install-clc[Install CLC]) * A recent version of Docker and Docker Compose To set up your project, complete the following steps: diff --git a/docs/modules/integrate/pages/integrate-with-feast.adoc b/docs/modules/integrate/pages/integrate-with-feast.adoc index 6dfe43837..143eac346 100644 --- a/docs/modules/integrate/pages/integrate-with-feast.adoc +++ b/docs/modules/integrate/pages/integrate-with-feast.adoc @@ -114,5 +114,5 @@ To use Feast with Hazelcast, you must do the following: You can also work through the following tutorials: -* Get Started with Feature Store -* Feature Compute and Transformation +* xref:integrate:feature-engineering-with-feast.adoc[Get started with Feast streaming] +* xref:integrate:streaming-features-with-feast.adoc[Get started with Feast feature engineering] diff --git a/docs/modules/integrate/pages/reliable-topic-connector.adoc b/docs/modules/integrate/pages/reliable-topic-connector.adoc index 916fa4d40..e603c5bd7 100644 --- a/docs/modules/integrate/pages/reliable-topic-connector.adoc +++ b/docs/modules/integrate/pages/reliable-topic-connector.adoc @@ -5,8 +5,7 @@ used as a data sink within a pipeline. == Installing the Connector -The map connector is included in the full and slim -distributions of Hazelcast. +This connector is included in the full and slim distributions of Hazelcast. == Permissions [.enterprise]*{enterprise-product-name}* diff --git a/docs/modules/integrate/pages/streaming-features-with-feast.adoc b/docs/modules/integrate/pages/streaming-features-with-feast.adoc index 8b33311a4..a6fa4f333 100644 --- a/docs/modules/integrate/pages/streaming-features-with-feast.adoc +++ b/docs/modules/integrate/pages/streaming-features-with-feast.adoc @@ -15,7 +15,7 @@ image:ROOT:feast_streaming.png[Feast streaming wokflow] You will need the following ready before starting the tutorial: -* Hazelcast CLC - link:https://docs.hazelcast.com/clc/latest/install-clc[Installation instructions] +* Hazelcast CLC (see link:https://docs.hazelcast.com/clc/latest/install-clc[Install CLC]) * A recent version of Docker and Docker Compose To set up your project, complete the following steps: @@ -349,6 +349,7 @@ Outputs something similar to: ] } ---- + == Summary In this tutorial, you learned how to set up a feature engineering project that uses Hazelcast as the online store. diff --git a/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc b/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc index 29f7d15ff..4752a4731 100644 --- a/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc +++ b/docs/modules/mapstore/pages/configuring-a-generic-maploader.adoc @@ -1,16 +1,37 @@ -= Using the Generic MapLoader += Using the generic MapLoader :description: With the xref:working-with-external-data.adoc#options[generic MapLoader], you can configure a map to cache data from an external system. This topic includes an example of how to configure a map with a generic MapLoader that connects to a MySQL database. :page-beta: false {description} +NOTE: The objects created in the distributed map are stored as GenericRecord. You can use the `type-name` property to store the data in a POJO (Plain Old Java Object). + For a list of all supported external systems, including databases, see available xref:external-data-stores:external-data-stores.adoc#connectors[data connection types]. -== Before you Begin +== Before you begin You need a xref:external-data-stores:external-data-stores.adoc[data connection] that's configured on all cluster members. -== Quickstart Configuration +== Add dependencies + +If you are using a Hazelcast JAR file, you need to ensure the following is added to your classpath: + +[source,xml] +---- + + com.hazelcast + hazelcast-sql + + + + com.hazelcast + hazelcast-mapstore + +---- + +NOTE: If you are using the slim distribution, you need to add `hazelcast-mapstore`. If you are using MongoDb, you also need to add `hazelcast-jet-mongodb`. + +== Quickstart configuration This example shows a basic map configuration that uses a data connection called `my-mysql-database`. See xref:data-structures:map.adoc[] for the details of other properties that you can include in your map configuration. @@ -70,11 +91,11 @@ instance().getConfig().addMapConfig(mapConfig); <2> The name of your data connection. [[mapping]] -== SQL Mapping for the Generic MapLoader +== SQL mapping for the generic MapLoader -When you configure a map with the generic MapLoader, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from the external system, and it is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. +When you configure a map with the generic MapLoader, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from the external system, and is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. -== Configuration Properties for the Generic MapLoader +== Configuration properties for the generic MapLoader These configuration properties allow you to configure the generic MapLoader and its SQL mapping. @@ -373,26 +394,82 @@ mapConfig.setMapStoreConfig(mapStoreConfig); -- ==== +|[[columns]]`type-name` +|The type name of the compact GenericRecord. Use this property to map your record to an existing domain class. + +| +The name of the map. +| + +[tabs] +==== +XML:: ++ +-- +[source,xml] +---- + + + + com.hazelcast.mapstore.GenericMapStore + + my-mysql-database + org.example.Person + + + +---- +-- +YAML:: ++ +-- +[source,yaml] +---- +hazelcast: + map: + mymapname: + map-store: + enabled: true + class-name: com.hazelcast.mapstore.GenericMapStore + properties: + data-connection-ref: my-mysql-database + type-name: org.example.Person +---- +-- +Java:: ++ +-- +[source,java] +---- +MapConfig mapConfig = new MapConfig("myMapName"); + +MapStoreConfig mapStoreConfig = new MapStoreConfig(); +mapStoreConfig.setClassName("com.hazelcast.mapstore.GenericMapStore"); +mapStoreConfig.setProperty("data-connection-ref", "my-mysql-database"); +mapStoreConfig.setProperty("type-name", "org.example.Person"); + +mapConfig.setMapStoreConfig(mapStoreConfig); +---- +-- +==== + |=== == Supported backends -GenericMapStore needs a SQL Connector that supports `SELECT`, `UPDATE`, `SINK INTO` and `DELETE` statements. +The generic MapStore needs a SQL Connector that supports `SELECT`, `UPDATE`, `SINK INTO` and `DELETE` statements. Officially supported connectors: -- JDBC Connector - * supports MySQL, PostgreSQL. - * requires JDBC driver on the classpath -- MongoDB Connector - * make sure you have `hazelcast-jet-mongodb` artifact included on the classpath. +- MySQL, PostgreSQL, Microsoft SQL Server, Oracle (it uses JDBC SQL Connector). +- MongoDB (make sure you have `hazelcast-jet-mongodb` artifact included on the classpath). -== Related Resources +== Related resources - To monitor MapStores for each loaded entry, use the `EntryLoadedListener` interface. See the xref:events:object-events.adoc#listening-for-map-events[Listening for Map Events section] to learn how you can catch entry-based events. - xref:mapstore-triggers.adoc[]. -== Next Steps +== Next steps -See the MapStore xref:configuration-guide.adoc[configuration guide] for details about configuration options, including caching behaviors. +See the xref:configuration-guide.adoc[MapStore configuration guide] for details about configuration options, including caching behaviors. diff --git a/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc b/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc index e36b33f78..77f146d94 100644 --- a/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc +++ b/docs/modules/mapstore/pages/configuring-a-generic-mapstore.adoc @@ -1,16 +1,37 @@ -= Using the Generic MapStore += Using the generic MapStore :description: With the xref:working-with-external-data.adoc#options[generic MapStore], you can configure a map to cache data from and write data back to an external system. This topic includes an example of how to configure a map with a generic MapStore that connects to a MySQL database. :page-beta: false {description} +NOTE: The objects created in the distributed map are stored as GenericRecord. You can use the `type-name` property to store the data in a POJO (Plain Old Java Object). + For a list of all supported external systems, including databases, see available xref:external-data-stores:external-data-stores.adoc#connectors[data connection types]. -== Before you Begin +== Before you begin You need a xref:external-data-stores:external-data-stores.adoc[data connection] that's configured on all cluster members. -== Quickstart Configuration +== Add dependencies + +If you are using a Hazelcast JAR file, you need to ensure the following is added to your classpath: + +[source,xml] +---- + + com.hazelcast + hazelcast-sql + + + + com.hazelcast + hazelcast-mapstore + +---- + +NOTE: If you are using the slim distribution, you need to add `hazelcast-mapstore`. If you are using MongoDb, you also need to add `hazelcast-jet-mongodb`. + +== Quickstart configuration This example shows a basic map configuration that uses a data connection called `my-mysql-database`. See xref:data-structures:map.adoc[] for the details of other properties that you include in your map configuration. @@ -70,11 +91,11 @@ instance().getConfig().addMapConfig(mapConfig); <2> The name of your data connection. [[mapping]] -== SQL Mapping for the Generic MapStore +== SQL mapping for the generic MapStore -When you configure a map with the generic MapStore, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from or write data to the external system and it is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. +When you configure a map with the generic MapStore, Hazelcast creates a xref:sql:mapping-to-jdbc.adoc[SQL mapping with the JDBC connector]. The name of the mapping is the same name as your map prefixed with `__map-store.`. This mapping is used to read data from or write data to the external system and is removed whenever the configured map is removed. You can also configure this SQL mapping, using <>. -== Configuration Properties for the Generic MapStore +== Configuration properties for the generic MapStore These configuration properties allow you to configure the generic MapStore and its SQL mapping. @@ -373,18 +394,77 @@ mapConfig.setMapStoreConfig(mapStoreConfig); -- ==== +|[[columns]]`type-name` +|The type name of the compact GenericRecord. Use this property to map your record to an existing domain class. + +| +The name of the map. +| + +[tabs] +==== +XML:: ++ +-- +[source,xml] +---- + + + + com.hazelcast.mapstore.GenericMapStore + + my-mysql-database + org.example.Person + + + +---- +-- +YAML:: ++ +-- +[source,yaml] +---- +hazelcast: + map: + mymapname: + map-store: + enabled: true + class-name: com.hazelcast.mapstore.GenericMapStore + properties: + data-connection-ref: my-mysql-database + type-name: org.example.Person +---- +-- +Java:: ++ +-- +[source,java] +---- +MapConfig mapConfig = new MapConfig("myMapName"); + +MapStoreConfig mapStoreConfig = new MapStoreConfig(); +mapStoreConfig.setClassName("com.hazelcast.mapstore.GenericMapStore"); +mapStoreConfig.setProperty("data-connection-ref", "my-mysql-database"); +mapStoreConfig.setProperty("type-name", "org.example.Person"); + +mapConfig.setMapStoreConfig(mapStoreConfig); +---- +-- +==== + |=== == Supported backends -You can use any database as the MapStore backend as long as you have its Hazelcast SQL Connector on the classpath. +The generic MapStore needs a SQL Connector that supports `SELECT`, `UPDATE`, `SINK INTO` and `DELETE` statements. -Officially supported backend databases: +Officially supported connectors: - MySQL, PostgreSQL, Microsoft SQL Server, Oracle (it uses JDBC SQL Connector). - MongoDB (make sure you have `hazelcast-jet-mongodb` artifact included on the classpath). -== Related Resources +== Related resources - To monitor MapStores for each loaded entry, use the `EntryLoadedListener` interface. See the xref:events:object-events.adoc#listening-for-map-events[Listening for Map Events section] to learn how you can catch entry-based events. @@ -392,4 +472,4 @@ Officially supported backend databases: == Next Steps -See the MapStore xref:configuration-guide.adoc[configuration guide] for details about configuration options, including caching behaviors. +See the xref:configuration-guide.adoc[MapStore configuration guide] for details about configuration options, including caching behaviors. diff --git a/docs/modules/pipelines/pages/cdc-join.adoc b/docs/modules/pipelines/pages/cdc-join.adoc index c287bbb5b..d5fca7f64 100644 --- a/docs/modules/pipelines/pages/cdc-join.adoc +++ b/docs/modules/pipelines/pages/cdc-join.adoc @@ -622,8 +622,6 @@ You should see the following jars: . Enable user code deployment: + -include::clusters:partial$ucn-migrate-tip.adoc[] -+ Due to the type of sink we are using in our pipeline we need to make some extra changes in order for the Hazelcast cluster to be aware of the custom classes we have defined. + diff --git a/docs/modules/wan/pages/configuring-for-map-and-cache.adoc b/docs/modules/wan/pages/configuring-for-map-and-cache.adoc index a96cae6b2..691e107b9 100644 --- a/docs/modules/wan/pages/configuring-for-map-and-cache.adoc +++ b/docs/modules/wan/pages/configuring-for-map-and-cache.adoc @@ -88,7 +88,7 @@ the target map if it does not exist in the target map. * `HigherHitsMergePolicy`: Incoming entry merges from the source map to the target map if the source entry has more hits than the target one. * `PassThroughMergePolicy`: Incoming entry merges from the source map to -the target map unless the incoming entry is not null. +the target map unless the incoming entry is null. * `ExpirationTimeMergePolicy`: Incoming entry merges from the source map to the target map if the source entry will expire later than the destination entry. Please note that this merge policy can only be used when the clusters' clocks are in sync. @@ -169,7 +169,7 @@ the target cache if it does not exist in the target cache. * `HigherHitsMergePolicy`: Incoming entry merges from the source cache to the target cache if the source entry has more hits than the target one. * `PassThroughMergePolicy`: Incoming entry merges from the source cache to -the target cache unless the incoming entry is not null. +the target cache unless the incoming entry is null. * `ExpirationTimeMergePolicy`: Incoming entry merges from the source cache to the target cache if the source entry will expire later than the destination entry. Please note that this merge policy can only be used when the clusters' clocks are in sync.